// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-// vim: ts=8 sw=2 smarttab
+// vim: ts=8 sw=2 smarttab ft=cpp
#include <errno.h>
#include <stdlib.h>
+#include <system_error>
#include <unistd.h>
#include <sstream>
+#include <string_view>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/optional.hpp>
+#include <boost/utility/in_place_factory.hpp>
+#include "include/scope_guard.h"
#include "common/Clock.h"
#include "common/armor.h"
+#include "common/errno.h"
#include "common/mime.h"
#include "common/utf8.h"
#include "common/ceph_json.h"
+#include "common/static_ptr.h"
+#include "rgw_tracer.h"
#include "rgw_rados.h"
+#include "rgw_zone.h"
#include "rgw_op.h"
#include "rgw_rest.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"
#include "rgw_acl_swift.h"
+#include "rgw_aio_throttle.h"
#include "rgw_user.h"
#include "rgw_bucket.h"
#include "rgw_log.h"
#include "rgw_client_io.h"
#include "rgw_compression.h"
#include "rgw_role.h"
+#include "rgw_tag_s3.h"
+#include "rgw_putobj_processor.h"
+#include "rgw_crypt.h"
+#include "rgw_perf_counters.h"
+#include "rgw_notify.h"
+#include "rgw_notify_event_type.h"
+#include "rgw_sal.h"
+#include "rgw_sal_rados.h"
+
+#include "services/svc_zone.h"
+#include "services/svc_quota.h"
+#include "services/svc_sys_obj.h"
+
#include "cls/lock/cls_lock_client.h"
#include "cls/rgw/cls_rgw_client.h"
-#include "include/assert.h"
+#include "include/ceph_assert.h"
#include "compressor/Compressor.h"
-#include "rgw_acl_swift.h"
+#ifdef WITH_LTTNG
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
+#include "tracing/rgw_op.h"
+#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
+#undef TRACEPOINT_DEFINE
+#else
+#define tracepoint(...)
+#endif
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
using namespace librados;
using ceph::crypto::MD5;
+using boost::optional;
+using boost::none;
+using rgw::ARN;
+using rgw::IAM::Effect;
+using rgw::IAM::Policy;
static string mp_ns = RGW_OBJ_NS_MULTIPART;
static string shadow_ns = RGW_OBJ_NS_SHADOW;
-static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
-static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
- bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
+static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name);
static MultipartMetaFilter mp_filter;
-static int parse_range(const char *range, off_t& ofs, off_t& end, bool *partial_content)
+// this probably should belong in the rgw_iam_policy_keywords, I'll get it to it
+// at some point
+static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag";
+static constexpr auto S3_RESOURCE_TAG = "s3:ResourceTag";
+static constexpr auto S3_RUNTIME_RESOURCE_VAL = "${s3:ResourceTag";
+
+int RGWGetObj::parse_range(void)
{
int r = -ERANGE;
- string s(range);
+ string rs(range_str);
string ofs_str;
string end_str;
- *partial_content = false;
+ ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
+ partial_content = false;
- size_t pos = s.find("bytes=");
+ size_t pos = rs.find("bytes=");
if (pos == string::npos) {
pos = 0;
- while (isspace(s[pos]))
+ while (isspace(rs[pos]))
pos++;
int end = pos;
- while (isalpha(s[end]))
+ while (isalpha(rs[end]))
end++;
- if (strncasecmp(s.c_str(), "bytes", end - pos) != 0)
+ if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
return 0;
- while (isspace(s[end]))
+ while (isspace(rs[end]))
end++;
- if (s[end] != '=')
+ if (rs[end] != '=')
return 0;
- s = s.substr(end + 1);
+ rs = rs.substr(end + 1);
} else {
- s = s.substr(pos + 6); /* size of("bytes=") */
+ rs = rs.substr(pos + 6); /* size of("bytes=") */
}
- pos = s.find('-');
+ pos = rs.find('-');
if (pos == string::npos)
goto done;
- *partial_content = true;
+ partial_content = true;
- ofs_str = s.substr(0, pos);
- end_str = s.substr(pos + 1);
+ ofs_str = rs.substr(0, pos);
+ end_str = rs.substr(pos + 1);
if (end_str.length()) {
end = atoll(end_str.c_str());
if (end < 0)
if (end >= 0 && end < ofs)
goto done;
- r = 0;
+ range_parsed = true;
+ return 0;
+
done:
+ if (ignore_invalid_range) {
+ partial_content = false;
+ ofs = 0;
+ end = -1;
+ range_parsed = false; // allow retry
+ r = 0;
+ }
+
return r;
}
-static int decode_policy(CephContext *cct,
+static int decode_policy(const DoutPrefixProvider *dpp,
+ CephContext *cct,
bufferlist& bl,
RGWAccessControlPolicy *policy)
{
- bufferlist::iterator iter = bl.begin();
+ auto iter = bl.cbegin();
try {
policy->decode(iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
- if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
+ if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
+ ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy";
RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
- ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
s3policy->to_xml(*_dout);
*_dout << dendl;
}
}
-static int get_user_policy_from_attr(CephContext * const cct,
- RGWRados * const store,
- map<string, bufferlist>& attrs,
- RGWAccessControlPolicy& policy /* out */)
+static int get_user_policy_from_attr(const DoutPrefixProvider *dpp,
+ CephContext * const cct,
+ map<string, bufferlist>& attrs,
+ RGWAccessControlPolicy& policy /* out */)
{
auto aiter = attrs.find(RGW_ATTR_ACL);
if (aiter != attrs.end()) {
- int ret = decode_policy(cct, aiter->second, &policy);
+ int ret = decode_policy(dpp, cct, aiter->second, &policy);
if (ret < 0) {
return ret;
}
return 0;
}
-static int get_bucket_instance_policy_from_attr(CephContext *cct,
- RGWRados *store,
- RGWBucketInfo& bucket_info,
- map<string, bufferlist>& bucket_attrs,
- RGWAccessControlPolicy *policy,
- rgw_raw_obj& obj)
+/**
+ * Get the AccessControlPolicy for an object off of disk.
+ * policy: must point to a valid RGWACL, and will be filled upon return.
+ * bucket: name of the bucket containing the object.
+ * object: name of the object to get the ACL for.
+ * Returns: 0 on success, -ERR# otherwise.
+ */
+int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
+ CephContext *cct,
+ rgw::sal::Store* store,
+ RGWBucketInfo& bucket_info,
+ map<string, bufferlist>& bucket_attrs,
+ RGWAccessControlPolicy *policy,
+ optional_yield y)
{
map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
if (aiter != bucket_attrs.end()) {
- int ret = decode_policy(cct, aiter->second, policy);
+ int ret = decode_policy(dpp, cct, aiter->second, policy);
if (ret < 0)
return ret;
} else {
- ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
- RGWUserInfo uinfo;
+ ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
+ std::unique_ptr<rgw::sal::User> user = store->get_user(bucket_info.owner);
/* object exists, but policy is broken */
- int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
+ int r = user->load_user(dpp, y);
if (r < 0)
return r;
- policy->create_default(bucket_info.owner, uinfo.display_name);
+ policy->create_default(bucket_info.owner, user->get_display_name());
}
return 0;
}
-static int get_obj_policy_from_attr(CephContext *cct,
- RGWRados *store,
- RGWObjectCtx& obj_ctx,
- RGWBucketInfo& bucket_info,
- map<string, bufferlist>& bucket_attrs,
- RGWAccessControlPolicy *policy,
- rgw_obj& obj)
+static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
+ CephContext *cct,
+ rgw::sal::Store* store,
+ RGWObjectCtx& obj_ctx,
+ RGWBucketInfo& bucket_info,
+ map<string, bufferlist>& bucket_attrs,
+ RGWAccessControlPolicy *policy,
+ string *storage_class,
+ rgw::sal::Object* obj,
+ optional_yield y)
{
bufferlist bl;
int ret = 0;
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
- RGWRados::Object::Read rop(&op_target);
+ std::unique_ptr<rgw::sal::Object::ReadOp> rop = obj->get_read_op(&obj_ctx);
- ret = rop.get_attr(RGW_ATTR_ACL, bl);
+ ret = rop->get_attr(dpp, RGW_ATTR_ACL, bl, y);
if (ret >= 0) {
- ret = decode_policy(cct, bl, policy);
+ ret = decode_policy(dpp, cct, bl, policy);
if (ret < 0)
return ret;
} else if (ret == -ENODATA) {
/* object exists, but policy is broken */
- ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
- RGWUserInfo uinfo;
- ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
+ ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
+ std::unique_ptr<rgw::sal::User> user = store->get_user(bucket_info.owner);
+ ret = user->load_user(dpp, y);
if (ret < 0)
return ret;
- policy->create_default(bucket_info.owner, uinfo.display_name);
+ policy->create_default(bucket_info.owner, user->get_display_name());
}
- return ret;
-}
+ if (storage_class) {
+ bufferlist scbl;
+ int r = rop->get_attr(dpp, RGW_ATTR_STORAGE_CLASS, scbl, y);
+ if (r >= 0) {
+ *storage_class = scbl.to_str();
+ } else {
+ storage_class->clear();
+ }
+ }
-/**
- * Get the AccessControlPolicy for an object off of disk.
- * policy: must point to a valid RGWACL, and will be filled upon return.
- * bucket: name of the bucket containing the object.
- * object: name of the object to get the ACL for.
- * Returns: 0 on success, -ERR# otherwise.
- */
-static int get_bucket_policy_from_attr(CephContext *cct,
- RGWRados *store,
- RGWBucketInfo& bucket_info,
- map<string, bufferlist>& bucket_attrs,
- RGWAccessControlPolicy *policy)
-{
- rgw_raw_obj instance_obj;
- store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
- return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
- policy, instance_obj);
+ return ret;
}
-static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
-{
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- RGWRados::Object::Read read_op(&op_target);
-
- read_op.params.attrs = &attrs;
- read_op.params.perr = &s->err;
- return read_op.prepare();
+static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
+ map<string, bufferlist>& attrs,
+ const string& tenant) {
+ auto i = attrs.find(RGW_ATTR_IAM_POLICY);
+ if (i != attrs.end()) {
+ return Policy(cct, tenant, i->second);
+ } else {
+ return none;
+ }
}
-static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
+static boost::optional<PublicAccessBlockConfiguration>
+get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
{
- map<string, bufferlist> attrs;
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- RGWRados::Object::Read read_op(&op_target);
-
- read_op.params.attrs = &attrs;
- read_op.params.perr = &s->err;
-
- int r = read_op.prepare();
- if (r < 0) {
- return r;
+ if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
+ aiter != attrs.end()) {
+ bufferlist::const_iterator iter{&aiter->second};
+ PublicAccessBlockConfiguration access_conf;
+ try {
+ access_conf.decode(iter);
+ } catch (const buffer::error& e) {
+ return boost::none;
+ }
+ return access_conf;
}
- store->set_atomic(s->obj_ctx, read_op.state.obj);
- attrs[attr_name] = attr_val;
- return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
+ return boost::none;
}
-static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
- uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
-{
- RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- RGWRados::SystemObject::Read rop(&src);
-
- rop.stat_params.attrs = &attrs;
- rop.stat_params.obj_size = obj_size;
-
- int ret = rop.stat(objv_tracker);
- return ret;
+vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
+ map<string, bufferlist>& attrs,
+ const string& tenant) {
+ vector<Policy> policies;
+ if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
+ bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
+ map<string, string> policy_map;
+ decode(policy_map, out_bl);
+ for (auto& it : policy_map) {
+ bufferlist bl = bufferlist::static_from_string(it.second);
+ Policy p(cct, tenant, bl);
+ policies.push_back(std::move(p));
+ }
+ }
+ return policies;
}
-static int read_bucket_policy(RGWRados *store,
+static int read_bucket_policy(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
struct req_state *s,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
RGWAccessControlPolicy *policy,
- rgw_bucket& bucket)
+ rgw_bucket& bucket,
+ optional_yield y)
{
if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
- ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name
+ << " is suspended" << dendl;
return -ERR_USER_SUSPENDED;
}
return 0;
}
- int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
+ int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, policy, y);
if (ret == -ENOENT) {
ret = -ERR_NO_SUCH_BUCKET;
}
return ret;
}
-static int read_obj_policy(RGWRados *store,
+static int read_obj_policy(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
struct req_state *s,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
- RGWAccessControlPolicy *policy,
- rgw_bucket& bucket,
- rgw_obj_key& object)
+ RGWAccessControlPolicy* acl,
+ string *storage_class,
+ boost::optional<Policy>& policy,
+ rgw::sal::Bucket* bucket,
+ rgw::sal::Object* object,
+ optional_yield y,
+ bool copy_src=false)
{
string upload_id;
upload_id = s->info.args.get("uploadId");
+ std::unique_ptr<rgw::sal::Object> mpobj;
rgw_obj obj;
if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
- ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name
+ << " is suspended" << dendl;
return -ERR_USER_SUSPENDED;
}
- if (!upload_id.empty()) {
+ // when getting policy info for copy-source obj, upload_id makes no sense.
+ // 'copy_src' is used to make this function backward compatible.
+ if (!upload_id.empty() && !copy_src) {
/* multipart upload */
- RGWMPObj mp(object.name, upload_id);
- string oid = mp.get_meta();
- obj.init_ns(bucket, oid, mp_ns);
- obj.set_in_extra_data(true);
- } else {
- obj = rgw_obj(bucket, object);
+ std::unique_ptr<rgw::sal::MultipartUpload> upload;
+ upload = bucket->get_multipart_upload(object->get_name(), upload_id);
+ mpobj = upload->get_meta_obj();
+ mpobj->set_in_extra_data(true);
+ object = mpobj.get();
}
+ policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant());
+
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
- int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
- bucket_info, bucket_attrs, policy, obj);
+ int ret = get_obj_policy_from_attr(dpp, s->cct, store, *obj_ctx,
+ bucket_info, bucket_attrs, acl, storage_class, object, s->yield);
if (ret == -ENOENT) {
/* object does not exist checking the bucket's ACL to make sure
that we send a proper error code */
RGWAccessControlPolicy bucket_policy(s->cct);
- ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
+ ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, &bucket_policy, y);
if (ret < 0) {
return ret;
}
-
const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
- if (bucket_owner.compare(s->user->user_id) != 0 &&
- ! s->auth.identity->is_admin_of(bucket_owner) &&
- ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
- RGW_PERM_READ)) {
- ret = -EACCES;
+ if (bucket_owner.compare(s->user->get_id()) != 0 &&
+ ! s->auth.identity->is_admin_of(bucket_owner)) {
+ auto r = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3ListBucket, ARN(bucket->get_key()));
+ if (r == Effect::Allow)
+ return -ENOENT;
+ if (r == Effect::Deny)
+ return -EACCES;
+ if (policy) {
+ ARN b_arn(bucket->get_key());
+ r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, b_arn);
+ if (r == Effect::Allow)
+ return -ENOENT;
+ if (r == Effect::Deny)
+ return -EACCES;
+ }
+ if (! s->session_policies.empty()) {
+ r = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3ListBucket, ARN(bucket->get_key()));
+ if (r == Effect::Allow)
+ return -ENOENT;
+ if (r == Effect::Deny)
+ return -EACCES;
+ }
+ if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
+ ret = -EACCES;
+ else
+ ret = -ENOENT;
} else {
ret = -ENOENT;
}
* only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
+int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, struct req_state* s, optional_yield y)
{
int ret = 0;
- rgw_obj_key obj;
- RGWUserInfo bucket_owner_info;
- RGWObjectCtx obj_ctx(store);
string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
if (!bi.empty()) {
- ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
+ // note: overwrites s->bucket_name, may include a tenant/
+ ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_name, &s->bucket_instance_id, &s->bucket_instance_shard_id);
if (ret < 0) {
return ret;
}
}
if(s->dialect.compare("s3") == 0) {
- s->bucket_acl = new RGWAccessControlPolicy_S3(s->cct);
+ s->bucket_acl = std::make_unique<RGWAccessControlPolicy_S3>(s->cct);
} else if(s->dialect.compare("swift") == 0) {
/* We aren't allocating the account policy for those operations using
* the Swift's infrastructure that don't really need req_state::user.
* Typical example here is the implementation of /info. */
- if (!s->user->user_id.empty()) {
- s->user_acl = std::unique_ptr<RGWAccessControlPolicy>(
- new RGWAccessControlPolicy_SWIFTAcct(s->cct));
+ if (!s->user->get_id().empty()) {
+ s->user_acl = std::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
}
- s->bucket_acl = new RGWAccessControlPolicy_SWIFT(s->cct);
+ s->bucket_acl = std::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
} else {
- s->bucket_acl = new RGWAccessControlPolicy(s->cct);
+ s->bucket_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
}
/* check if copy source is within the current domain */
if (!s->src_bucket_name.empty()) {
- RGWBucketInfo source_info;
-
- if (s->bucket_instance_id.empty()) {
- ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
- } else {
- ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
- }
+ std::unique_ptr<rgw::sal::Bucket> src_bucket;
+ ret = store->get_bucket(dpp, nullptr,
+ rgw_bucket(s->src_tenant_name,
+ s->src_bucket_name,
+ s->bucket_instance_id),
+ &src_bucket, y);
if (ret == 0) {
- string& zonegroup = source_info.zonegroup;
- s->local_source = store->get_zonegroup().equals(zonegroup);
+ string& zonegroup = src_bucket->get_info().zonegroup;
+ s->local_source = store->get_zone()->get_zonegroup().equals(zonegroup);
}
}
rgw_user uid;
std::string display_name;
} acct_acl_user = {
- s->user->user_id,
- s->user->display_name,
+ s->user->get_id(),
+ s->user->get_display_name(),
};
if (!s->bucket_name.empty()) {
s->bucket_exists = true;
- if (s->bucket_instance_id.empty()) {
- ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
- } else {
- ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
- }
+
+ /* This is the only place that s->bucket is created. It should never be
+ * overwritten. */
+ ret = store->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y);
if (ret < 0) {
if (ret != -ENOENT) {
- string bucket_log;
- rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
- ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
- return ret;
+ string bucket_log;
+ bucket_log = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
+ ldpp_dout(dpp, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
+ << bucket_log << ")" << dendl;
+ return ret;
}
s->bucket_exists = false;
+ return -ERR_NO_SUCH_BUCKET;
}
- s->bucket = s->bucket_info.bucket;
-
- if (s->bucket_exists) {
- ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs, s->bucket_acl, s->bucket);
- acct_acl_user = {
- s->bucket_info.owner,
- s->bucket_acl->get_owner().get_display_name(),
- };
- } else {
- s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
- ret = -ERR_NO_SUCH_BUCKET;
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ s->object->set_bucket(s->bucket.get());
}
+
+ s->bucket_mtime = s->bucket->get_modification_time();
+ s->bucket_attrs = s->bucket->get_attrs();
+ ret = read_bucket_policy(dpp, store, s, s->bucket->get_info(),
+ s->bucket->get_attrs(),
+ s->bucket_acl.get(), s->bucket->get_key(), y);
+ acct_acl_user = {
+ s->bucket->get_info().owner,
+ s->bucket_acl->get_owner().get_display_name(),
+ };
s->bucket_owner = s->bucket_acl->get_owner();
RGWZoneGroup zonegroup;
- int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
+ int r = store->get_zone()->get_zonegroup(s->bucket->get_info().zonegroup, zonegroup);
if (!r) {
if (!zonegroup.endpoints.empty()) {
s->zonegroup_endpoint = zonegroup.endpoints.front();
ret = r;
}
- if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
- ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
+ if (!store->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) {
+ ldpp_dout(dpp, 0) << "NOTICE: request for data in a different zonegroup ("
+ << s->bucket->get_info().zonegroup << " != "
+ << store->get_zone()->get_zonegroup().get_id() << ")" << dendl;
/* we now need to make sure that the operation actually requires copy source, that is
* it's a copy operation
*/
- if (store->get_zonegroup().is_master && s->system_request) {
+ if (store->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) {
/*If this is the master, don't redirect*/
+ } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
+ /* If op is get bucket location, don't redirect */
} else if (!s->local_source ||
(s->op != OP_PUT && s->op != OP_COPY) ||
- s->object.empty()) {
+ rgw::sal::Object::empty(s->object.get())) {
return -ERR_PERMANENT_REDIRECT;
}
}
+
+ /* init dest placement */
+ s->dest_placement.storage_class = s->info.storage_class;
+ s->dest_placement.inherit_from(s->bucket->get_placement_rule());
+
+ if (!store->get_zone()->get_params().valid_placement(s->dest_placement)) {
+ ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
+ return -EINVAL;
+ }
+
+ s->bucket_access_conf = get_public_access_conf_from_attr(s->bucket->get_attrs());
}
/* handle user ACL only for those APIs which support it */
if (s->user_acl) {
- map<string, bufferlist> uattrs;
+ std::unique_ptr<rgw::sal::User> acl_user = store->get_user(acct_acl_user.uid);
- ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
+ ret = acl_user->read_attrs(dpp, y);
if (!ret) {
- ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
+ ret = get_user_policy_from_attr(dpp, s->cct, acl_user->get_attrs(), *s->user_acl);
}
if (-ENOENT == ret) {
/* In already existing clusters users won't have ACL. In such case
s->user_acl->create_default(acct_acl_user.uid,
acct_acl_user.display_name);
ret = 0;
- } else {
- ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
- << s->user->user_id
- << ", ret="
- << ret
- << ")" << dendl;
+ } else if (ret < 0) {
+ ldpp_dout(dpp, 0) << "NOTICE: couldn't get user attrs for handling ACL "
+ "(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl;
return ret;
}
}
+ // We don't need user policies in case of STS token returned by AssumeRole,
+ // hence the check for user type
+ if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
+ try {
+ ret = s->user->read_attrs(dpp, y);
+ if (ret == 0) {
+ auto user_policies = get_iam_user_policy_from_attr(s->cct,
+ s->user->get_attrs(),
+ s->user->get_tenant());
+ s->iam_user_policies.insert(s->iam_user_policies.end(),
+ std::make_move_iterator(user_policies.begin()),
+ std::make_move_iterator(user_policies.end()));
+ } else {
+ if (ret == -ENOENT)
+ ret = 0;
+ else ret = -EACCES;
+ }
+ } catch (const std::exception& e) {
+ ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
+ ret = -EACCES;
+ }
+ }
+
+ try {
+ s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs, s->bucket_tenant);
+ } catch (const std::exception& e) {
+ // Really this is a can't happen condition. We parse the policy
+ // when it's given to us, so perhaps we should abort or otherwise
+ // raise bloody murder.
+ ldpp_dout(dpp, 0) << "Error reading IAM Policy: " << e.what() << dendl;
+ ret = -EACCES;
+ }
+ bool success = store->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint);
+ if (success) {
+ ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
+ }
return ret;
}
* only_bucket: If true, reads the bucket ACL rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_object_policies(RGWRados *store, struct req_state *s,
- bool prefetch_data)
+int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ struct req_state *s, bool prefetch_data, optional_yield y)
{
int ret = 0;
- if (!s->object.empty()) {
+ if (!rgw::sal::Object::empty(s->object.get())) {
if (!s->bucket_exists) {
return -ERR_NO_SUCH_BUCKET;
}
- s->object_acl = new RGWAccessControlPolicy(s->cct);
+ s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
- rgw_obj obj(s->bucket, s->object);
-
- store->set_atomic(s->obj_ctx, obj);
+ s->object->set_atomic(s->obj_ctx);
if (prefetch_data) {
- store->set_prefetch_data(s->obj_ctx, obj);
+ s->object->set_prefetch_data(s->obj_ctx);
}
- ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs, s->object_acl, s->bucket, s->object);
+ ret = read_obj_policy(dpp, store, s, s->bucket->get_info(), s->bucket_attrs,
+ s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(),
+ s->object.get(), y);
}
return ret;
}
-static void rgw_bucket_object_pre_exec(struct req_state *s)
-{
- if (s->expect_cont)
- dump_continue(s);
+static int rgw_iam_remove_objtags(const DoutPrefixProvider *dpp, struct req_state* s, rgw::sal::Object* object, bool has_existing_obj_tag, bool has_resource_tag) {
+ object->set_atomic(s->obj_ctx);
+ int op_ret = object->get_obj_attrs(s->obj_ctx, s->yield, dpp);
+ if (op_ret < 0)
+ return op_ret;
+ rgw::sal::Attrs attrs = object->get_attrs();
+ auto tags = attrs.find(RGW_ATTR_TAGS);
+ if (tags != attrs.end()) {
+ RGWObjTags tagset;
+ try {
+ auto bliter = tags->second.cbegin();
+ tagset.decode(bliter);
+ } catch (buffer::error& err) {
+ ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
+ return -EIO;
+ }
+ for (auto& tag: tagset.get_tags()) {
+ if (has_existing_obj_tag) {
+ vector<std::unordered_multimap<string, string>::iterator> iters;
+ string key = "s3:ExistingObjectTag/" + tag.first;
+ auto result = s->env.equal_range(key);
+ for (auto& it = result.first; it != result.second; ++it)
+ {
+ if (tag.second == it->second) {
+ iters.emplace_back(it);
+ }
+ }
+ for (auto& it : iters) {
+ s->env.erase(it);
+ }
+ }//end if has_existing_obj_tag
+ if (has_resource_tag) {
+ vector<std::unordered_multimap<string, string>::iterator> iters;
+ string key = "s3:ResourceTag/" + tag.first;
+ auto result = s->env.equal_range(key);
+ for (auto& it = result.first; it != result.second; ++it)
+ {
+ if (tag.second == it->second) {
+ iters.emplace_back(it);
+ }
+ }
+ for (auto& it : iters) {
+ s->env.erase(it);
+ }
+ }//end if has_resource_tag
+ }
+ }
+ return 0;
+}
- dump_bucket_from_state(s);
+void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){
+ // This variant just adds non empty key pairs to IAM env., values can be empty
+ // in certain cases like tagging
+ if (!key.empty())
+ e.emplace(key,val);
}
-int RGWGetObj::verify_permission()
-{
- obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
- if (get_data) {
- store->set_prefetch_data(s->obj_ctx, obj);
+static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl, bool has_existing_obj_tag=false, bool has_resource_tag=false){
+ RGWObjTags& tagset = s->tagset;
+ try {
+ auto bliter = bl.cbegin();
+ tagset.decode(bliter);
+ } catch (buffer::error& err) {
+ ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
+ return -EIO;
}
- if (!verify_object_permission(s, RGW_PERM_READ)) {
- return -EACCES;
+ for (const auto& tag: tagset.get_tags()){
+ if (has_existing_obj_tag)
+ rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second);
+ if (has_resource_tag)
+ rgw_add_to_iam_environment(s->env, "s3:ResourceTag/" + tag.first, tag.second);
}
+ return 0;
+}
+static int rgw_iam_add_objtags(const DoutPrefixProvider *dpp, struct req_state* s, rgw::sal::Object* object, bool has_existing_obj_tag, bool has_resource_tag) {
+ object->set_atomic(s->obj_ctx);
+ int op_ret = object->get_obj_attrs(s->obj_ctx, s->yield, dpp);
+ if (op_ret < 0)
+ return op_ret;
+ rgw::sal::Attrs attrs = object->get_attrs();
+ auto tags = attrs.find(RGW_ATTR_TAGS);
+ if (tags != attrs.end()){
+ return rgw_iam_add_tags_from_bl(s, tags->second, has_existing_obj_tag, has_resource_tag);
+ }
return 0;
}
+static int rgw_iam_add_objtags(const DoutPrefixProvider *dpp, struct req_state* s, bool has_existing_obj_tag, bool has_resource_tag) {
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ return rgw_iam_add_objtags(dpp, s, s->object.get(), has_existing_obj_tag, has_resource_tag);
+ }
+ return 0;
+}
-int RGWOp::verify_op_mask()
-{
- uint32_t required_mask = op_mask();
+static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, struct req_state* s, rgw::sal::Bucket* bucket) {
+ rgw::sal::Attrs attrs = bucket->get_attrs();
+ auto tags = attrs.find(RGW_ATTR_TAGS);
+ if (tags != attrs.end()) {
+ return rgw_iam_add_tags_from_bl(s, tags->second, false, true);
+ }
+ return 0;
+}
- ldout(s->cct, 20) << "required_mask= " << required_mask
- << " user.op_mask=" << s->user->op_mask << dendl;
+static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, struct req_state* s) {
+ return rgw_iam_add_buckettags(dpp, s, s->bucket.get());
+}
- if ((s->user->op_mask & required_mask) != required_mask) {
- return -EPERM;
+static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp,
+ boost::optional<rgw::IAM::Policy> iam_policy,
+ boost::optional<vector<rgw::IAM::Policy>> identity_policies,
+ boost::optional<vector<rgw::IAM::Policy>> session_policies,
+ bool check_obj_exist_tag=true) {
+ bool has_existing_obj_tag = false, has_resource_tag = false;
+ bool iam_policy_s3_exist_tag = false, iam_policy_s3_resource_tag = false;
+ if (iam_policy) {
+ if (check_obj_exist_tag) {
+ iam_policy_s3_exist_tag = iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG);
+ }
+ iam_policy_s3_resource_tag = iam_policy->has_partial_conditional(S3_RESOURCE_TAG) || iam_policy->has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL);
}
- if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
- ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
- return -EPERM;
+ bool identity_policy_s3_exist_tag = false, identity_policy_s3_resource_tag = false;
+ if (identity_policies) {
+ for (auto& identity_policy : identity_policies.get()) {
+ if (check_obj_exist_tag) {
+ if (identity_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
+ identity_policy_s3_exist_tag = true;
+ }
+ if (identity_policy.has_partial_conditional(S3_RESOURCE_TAG) || identity_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL))
+ identity_policy_s3_resource_tag = true;
+ if (identity_policy_s3_exist_tag && identity_policy_s3_resource_tag) // check all policies till both are set to true
+ break;
+ }
}
- return 0;
+ bool session_policy_s3_exist_tag = false, session_policy_s3_resource_flag = false;
+ if (session_policies) {
+ for (auto& session_policy : session_policies.get()) {
+ if (check_obj_exist_tag) {
+ if (session_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
+ session_policy_s3_exist_tag = true;
+ }
+ if (session_policy.has_partial_conditional(S3_RESOURCE_TAG) || session_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL))
+ session_policy_s3_resource_flag = true;
+ if (session_policy_s3_exist_tag && session_policy_s3_resource_flag)
+ break;
+ }
+ }
+
+ has_existing_obj_tag = iam_policy_s3_exist_tag || identity_policy_s3_exist_tag || session_policy_s3_exist_tag;
+ has_resource_tag = iam_policy_s3_resource_tag || identity_policy_s3_resource_tag || session_policy_s3_resource_flag;
+ return make_tuple(has_existing_obj_tag, has_resource_tag);
}
-int RGWOp::do_aws4_auth_completion()
-{
- int ret;
+static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, struct req_state* s, bool check_obj_exist_tag=true) {
+ return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_user_policies, s->session_policies, check_obj_exist_tag);
+}
- if (s->aws4_auth_needs_complete) {
- /* complete */
- ret = RGW_Auth_S3::authorize_aws4_auth_complete(store, s);
- s->aws4_auth_needs_complete = false;
- if (ret) {
- return ret;
- }
- /* verify signature */
- if (s->aws4_auth->signature != s->aws4_auth->new_signature) {
- ret = -ERR_SIGNATURE_NO_MATCH;
- ldout(s->cct, 20) << "delayed aws4 auth failed" << dendl;
- return ret;
+static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct req_state *s){
+
+ using header_pair_t = std::pair <const char*, const char*>;
+ static const std::initializer_list <header_pair_t> acl_header_conditionals {
+ {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"},
+ {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"},
+ {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"},
+ {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"},
+ {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}
+ };
+
+ if (s->has_acl_header){
+ for (const auto& c: acl_header_conditionals){
+ auto hdr = s->info.env->get(c.first);
+ if(hdr) {
+ e.emplace(c.second, hdr);
+ }
}
- /* authorization ok */
- dout(10) << "v4 auth ok" << dendl;
}
-
- return 0;
}
-int RGWOp::init_quota()
+void rgw_build_iam_environment(rgw::sal::Store* store,
+ struct req_state* s)
{
- /* no quota enforcement for system requests */
- if (s->system_request)
- return 0;
+ const auto& m = s->info.env->get_map();
+ auto t = ceph::real_clock::now();
+ s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
+ s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t));
+ // TODO: This is fine for now, but once we have STS we'll need to
+ // look and see. Also this won't work with the IdentityApplier
+ // model, since we need to know the actual credential.
+ s->env.emplace("aws:PrincipalType", "User");
- /* init quota related stuff */
- if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
- return 0;
+ auto i = m.find("HTTP_REFERER");
+ if (i != m.end()) {
+ s->env.emplace("aws:Referer", i->second);
}
- /* only interested in object related ops */
- if (s->object.empty()) {
- return 0;
+ if (rgw_transport_is_secure(s->cct, *s->info.env)) {
+ s->env.emplace("aws:SecureTransport", "true");
}
- RGWUserInfo owner_info;
- RGWUserInfo *uinfo;
-
- if (s->user->user_id == s->bucket_owner.get_id()) {
- uinfo = s->user;
+ const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
+ if (remote_addr_param.length()) {
+ i = m.find(remote_addr_param);
} else {
- int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
- if (r < 0)
- return r;
- uinfo = &owner_info;
+ i = m.find("REMOTE_ADDR");
+ }
+ if (i != m.end()) {
+ const string* ip = &(i->second);
+ string temp;
+ if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
+ const auto comma = ip->find(',');
+ if (comma != string::npos) {
+ temp.assign(*ip, 0, comma);
+ ip = &temp;
+ }
+ }
+ s->env.emplace("aws:SourceIp", *ip);
}
- if (s->bucket_info.quota.enabled) {
- bucket_quota = s->bucket_info.quota;
- } else if (uinfo->bucket_quota.enabled) {
- bucket_quota = uinfo->bucket_quota;
- } else {
- bucket_quota = store->get_bucket_quota();
+ i = m.find("HTTP_USER_AGENT"); {
+ if (i != m.end())
+ s->env.emplace("aws:UserAgent", i->second);
}
- if (uinfo->user_quota.enabled) {
- user_quota = uinfo->user_quota;
- } else {
- user_quota = store->get_user_quota();
+ if (s->user) {
+ // What to do about aws::userid? One can have multiple access
+ // keys so that isn't really suitable. Do we have a durable
+ // identifier that can persist through name changes?
+ s->env.emplace("aws:username", s->user->get_id().id);
}
- return 0;
+ i = m.find("HTTP_X_AMZ_SECURITY_TOKEN");
+ if (i != m.end()) {
+ s->env.emplace("sts:authentication", "true");
+ } else {
+ s->env.emplace("sts:authentication", "false");
+ }
}
-static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
- uint8_t flags = 0;
-
- if (!req_meth) {
- dout(5) << "req_meth is null" << dendl;
- return false;
- }
+void rgw_bucket_object_pre_exec(struct req_state *s)
+{
+ if (s->expect_cont)
+ dump_continue(s);
- if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
- else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
- else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
- else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
- else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
+ dump_bucket_from_state(s);
+}
- if ((rule->get_allowed_methods() & flags) == flags) {
- dout(10) << "Method " << req_meth << " is supported" << dendl;
- } else {
- dout(5) << "Method " << req_meth << " is not supported" << dendl;
- return false;
+// So! Now and then when we try to update bucket information, the
+// bucket has changed during the course of the operation. (Or we have
+// a cache consistency problem that Watch/Notify isn't ruling out
+// completely.)
+//
+// When this happens, we need to update the bucket info and try
+// again. We have, however, to try the right *part* again. We can't
+// simply re-send, since that will obliterate the previous update.
+//
+// Thus, callers of this function should include everything that
+// merges information to be changed into the bucket information as
+// well as the call to set it.
+//
+// The called function must return an integer, negative on error. In
+// general, they should just return op_ret.
+namespace {
+template<typename F>
+int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::Bucket* b, const F& f) {
+ auto r = f();
+ for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
+ r = b->try_refresh_info(dpp, nullptr);
+ if (r >= 0) {
+ r = f();
+ }
}
-
- return true;
+ return r;
+}
}
-int RGWOp::read_bucket_cors()
+
+int RGWGetObj::verify_permission(optional_yield y)
{
- bufferlist bl;
+ s->object->set_atomic(s->obj_ctx);
- map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
- if (aiter == s->bucket_attrs.end()) {
- ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
- cors_exist = false;
- return 0; /* no CORS configuration found */
+ if (prefetch_data()) {
+ s->object->set_prefetch_data(s->obj_ctx);
}
- cors_exist = true;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- bl = aiter->second;
+ if (torrent.get_flag()) {
+ if (s->object->get_instance().empty()) {
+ action = rgw::IAM::s3GetObjectTorrent;
+ } else {
+ action = rgw::IAM::s3GetObjectVersionTorrent;
+ }
+ } else {
+ if (s->object->get_instance().empty()) {
+ action = rgw::IAM::s3GetObject;
+ } else {
+ action = rgw::IAM::s3GetObjectVersion;
+ }
+ }
- bufferlist::iterator iter = bl.begin();
- try {
- bucket_cors.decode(iter);
- } catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
- return -EIO;
+ if (!verify_object_permission(this, s, action)) {
+ return -EACCES;
}
- if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
- RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
- ldout(s->cct, 15) << "Read RGWCORSConfiguration";
- s3cors->to_xml(*_dout);
- *_dout << dendl;
+
+ if (s->bucket->get_info().obj_lock_enabled()) {
+ get_retention = verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention);
+ get_legal_hold = verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold);
}
+
return 0;
}
-/** CORS 6.2.6.
- * If any of the header field-names is not a ASCII case-insensitive match for
- * any of the values in list of headers do not set any additional headers and
- * terminate this set of steps.
- * */
-static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
- if (req_hdrs) {
- list<string> hl;
- get_str_list(req_hdrs, hl);
- for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
- if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
- dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
- } else {
- if (hdrs.length() > 0) hdrs.append(",");
- hdrs.append((*it));
- }
- }
- }
- rule->format_exp_headers(exp_hdrs);
- *max_age = rule->get_max_age();
-}
+RGWOp::~RGWOp(){};
-/**
- * Generate the CORS header response
- *
- * This is described in the CORS standard, section 6.2.
- */
-bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
+int RGWOp::verify_op_mask()
{
- /* CORS 6.2.1. */
- const char *orig = s->info.env->get("HTTP_ORIGIN");
- if (!orig) {
- return false;
- }
+ uint32_t required_mask = op_mask();
- /* Custom: */
- origin = orig;
- op_ret = read_bucket_cors();
- if (op_ret < 0) {
- return false;
+ ldpp_dout(this, 20) << "required_mask= " << required_mask
+ << " user.op_mask=" << s->user->get_info().op_mask << dendl;
+
+ if ((s->user->get_info().op_mask & required_mask) != required_mask) {
+ return -EPERM;
}
- if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
- return false;
+ if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->get_zone()->is_writeable()) {
+ ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
+ "non-system user, permission denied" << dendl;
+ return -EPERM;
}
- /* CORS 6.2.2. */
- RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
- if (!rule)
- return false;
-
- /*
- * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
- * and no Authorization was send by the client
- *
- * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
- * For requests without credentials, the server may specify "*" as a wildcard,
- * thereby allowing any origin to access the resource.
- */
- const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
- if (!authorization && rule->has_wildcard_origin())
- origin = "*";
-
- /* CORS 6.2.3. */
- const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
- if (!req_meth) {
- req_meth = s->info.method;
- }
-
- if (req_meth) {
- method = req_meth;
- /* CORS 6.2.5. */
- if (!validate_cors_rule_method(rule, req_meth)) {
- return false;
- }
- }
-
- /* CORS 6.2.4. */
- const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
-
- /* CORS 6.2.6. */
- get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
-
- return true;
+ return 0;
}
-int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
- const rgw_bucket_dir_entry& ent,
- RGWAccessControlPolicy * const bucket_policy,
- const off_t start_ofs,
- const off_t end_ofs)
+int RGWGetObjTags::verify_permission(optional_yield y)
{
- ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
- RGWGetObj_CB cb(this);
- RGWGetDataCB* filter = &cb;
- boost::optional<RGWGetObj_Decompress> decompress;
-
- int64_t cur_ofs = start_ofs;
- int64_t cur_end = end_ofs;
+ auto iam_action = s->object->get_instance().empty()?
+ rgw::IAM::s3GetObjectTagging:
+ rgw::IAM::s3GetObjectVersionTagging;
- rgw_obj part(bucket, ent.key);
-
- map<string, bufferlist> attrs;
-
- uint64_t obj_size;
- RGWObjectCtx obj_ctx(store);
- RGWAccessControlPolicy obj_policy(s->cct);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+ if (!verify_object_permission(this, s,iam_action))
+ return -EACCES;
- ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
+ return 0;
+}
- obj_ctx.obj.set_atomic(part);
- store->set_prefetch_data(&obj_ctx, part);
+void RGWGetObjTags::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
- RGWRados::Object::Read read_op(&op_target);
+void RGWGetObjTags::execute(optional_yield y)
+{
+ rgw::sal::Attrs attrs;
- read_op.conds.if_match = ent.meta.etag.c_str();
- read_op.params.attrs = &attrs;
- read_op.params.obj_size = &obj_size;
- read_op.params.perr = &s->err;
+ s->object->set_atomic(s->obj_ctx);
- op_ret = read_op.prepare();
- if (op_ret < 0)
- return op_ret;
- op_ret = read_op.range_to_ofs(obj_size, cur_ofs, cur_end);
- if (op_ret < 0)
- return op_ret;
- bool need_decompress;
- op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, y, this);
if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
- return -EIO;
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object
+ << " ret=" << op_ret << dendl;
+ return;
}
- if (need_decompress)
- {
- if (cs_info.orig_size != ent.meta.size) {
- // hmm.. something wrong, object not as expected, abort!
- ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
- ", actual read size=" << ent.meta.size << dendl;
- return -EIO;
- }
- decompress.emplace(s->cct, &cs_info, partial_content, filter);
- filter = &*decompress;
- }
- else
- {
- if (obj_size != ent.meta.size) {
- // hmm.. something wrong, object not as expected, abort!
- ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
- return -EIO;
- }
+ attrs = s->object->get_attrs();
+ auto tags = attrs.find(RGW_ATTR_TAGS);
+ if(tags != attrs.end()){
+ has_tags = true;
+ tags_bl.append(tags->second);
}
+ send_response_data(tags_bl);
+}
+
+int RGWPutObjTags::verify_permission(optional_yield y)
+{
+ auto iam_action = s->object->get_instance().empty() ?
+ rgw::IAM::s3PutObjectTagging:
+ rgw::IAM::s3PutObjectVersionTagging;
+
+ //Using buckets tags for authorization makes more sense.
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, true);
+ if (has_s3_existing_tag)
+ rgw_iam_add_objtags(this, s, true, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+ if (!verify_object_permission(this, s,iam_action))
+ return -EACCES;
+ return 0;
+}
- op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
+void RGWPutObjTags::execute(optional_yield y)
+{
+ op_ret = get_params(y);
if (op_ret < 0)
- return op_ret;
+ return;
- /* We can use global user_acl because LOs cannot have segments
- * stored inside different accounts. */
- if (s->system_request) {
- ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
- } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
- ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
- } else if (!verify_object_permission(s, s->user_acl.get(), bucket_policy,
- &obj_policy, RGW_PERM_READ)) {
- return -EPERM;
+ if (rgw::sal::Object::empty(s->object.get())){
+ op_ret= -EINVAL; // we only support tagging on existing objects
+ return;
}
- if (ent.meta.size == 0) {
- return 0;
+ s->object->set_atomic(s->obj_ctx);
+ op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_TAGS, tags_bl, y, this);
+ if (op_ret == -ECANCELED){
+ op_ret = -ERR_TAG_CONFLICT;
}
-
- perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
- filter->fixup_range(cur_ofs, cur_end);
- op_ret = read_op.iterate(cur_ofs, cur_end, filter);
- if (op_ret >= 0)
- op_ret = filter->flush();
- return op_ret;
}
-static int iterate_user_manifest_parts(CephContext * const cct,
- RGWRados * const store,
- const off_t ofs,
- const off_t end,
- RGWBucketInfo *pbucket_info,
- const string& obj_prefix,
- RGWAccessControlPolicy * const bucket_policy,
- uint64_t * const ptotal_len,
- uint64_t * const pobj_size,
- string * const pobj_sum,
- int (*cb)(rgw_bucket& bucket,
- const rgw_bucket_dir_entry& ent,
- RGWAccessControlPolicy * const bucket_policy,
- off_t start_ofs,
- off_t end_ofs,
- void *param),
- void * const cb_param)
+void RGWDeleteObjTags::pre_exec()
{
- rgw_bucket& bucket = pbucket_info->bucket;
- uint64_t obj_ofs = 0, len_count = 0;
- bool found_start = false, found_end = false, handled_end = false;
- string delim;
- bool is_truncated;
- vector<rgw_bucket_dir_entry> objs;
-
- utime_t start_time = ceph_clock_now();
-
- RGWRados::Bucket target(store, *pbucket_info);
- RGWRados::Bucket::List list_op(&target);
-
- list_op.params.prefix = obj_prefix;
- list_op.params.delim = delim;
+ rgw_bucket_object_pre_exec(s);
+}
- MD5 etag_sum;
- do {
-#define MAX_LIST_OBJS 100
- int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
- if (r < 0) {
- return r;
- }
- for (rgw_bucket_dir_entry& ent : objs) {
- uint64_t cur_total_len = obj_ofs;
- uint64_t start_ofs = 0, end_ofs = ent.meta.size;
+int RGWDeleteObjTags::verify_permission(optional_yield y)
+{
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ auto iam_action = s->object->get_instance().empty() ?
+ rgw::IAM::s3DeleteObjectTagging:
+ rgw::IAM::s3DeleteObjectVersionTagging;
- if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
- start_ofs = ofs - obj_ofs;
- found_start = true;
- }
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+ if (!verify_object_permission(this, s, iam_action))
+ return -EACCES;
+ }
+ return 0;
+}
- obj_ofs += ent.meta.size;
- if (pobj_sum) {
- etag_sum.Update((const byte *)ent.meta.etag.c_str(),
- ent.meta.etag.length());
- }
+void RGWDeleteObjTags::execute(optional_yield y)
+{
+ if (rgw::sal::Object::empty(s->object.get()))
+ return;
- if (!found_end && obj_ofs > (uint64_t)end) {
- end_ofs = end - cur_total_len + 1;
- found_end = true;
- }
+ op_ret = s->object->delete_obj_attrs(this, s->obj_ctx, RGW_ATTR_TAGS, y);
+}
- perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now() - start_time));
+int RGWGetBucketTags::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- if (found_start && !handled_end) {
- len_count += end_ofs - start_ofs;
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketTagging)) {
+ return -EACCES;
+ }
- if (cb) {
- r = cb(bucket, ent, bucket_policy, start_ofs, end_ofs, cb_param);
- if (r < 0) {
- return r;
- }
- }
- }
+ return 0;
+}
- handled_end = found_end;
- start_time = ceph_clock_now();
- }
- } while (is_truncated);
+void RGWGetBucketTags::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- if (ptotal_len) {
- *ptotal_len = len_count;
- }
- if (pobj_size) {
- *pobj_size = obj_ofs;
- }
- if (pobj_sum) {
- complete_etag(etag_sum, pobj_sum);
+void RGWGetBucketTags::execute(optional_yield y)
+{
+ auto iter = s->bucket_attrs.find(RGW_ATTR_TAGS);
+ if (iter != s->bucket_attrs.end()) {
+ has_tags = true;
+ tags_bl.append(iter->second);
+ } else {
+ op_ret = -ERR_NO_SUCH_TAG_SET;
}
-
- return 0;
+ send_response_data(tags_bl);
}
-struct rgw_slo_part {
- RGWAccessControlPolicy *bucket_policy;
- rgw_bucket bucket;
- string obj_name;
- uint64_t size;
- string etag;
+int RGWPutBucketTags::verify_permission(optional_yield y) {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- rgw_slo_part() : bucket_policy(NULL), size(0) {}
-};
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
+}
-static int iterate_slo_parts(CephContext *cct,
- RGWRados *store,
- off_t ofs,
- off_t end,
- map<uint64_t, rgw_slo_part>& slo_parts,
- int (*cb)(rgw_bucket& bucket,
- const rgw_bucket_dir_entry& ent,
- RGWAccessControlPolicy *bucket_policy,
- off_t start_ofs,
- off_t end_ofs,
- void *param),
- void *cb_param)
+void RGWPutBucketTags::execute(optional_yield y)
{
- bool found_start = false, found_end = false;
-
- if (slo_parts.empty()) {
- return 0;
- }
- utime_t start_time = ceph_clock_now();
+ op_ret = get_params(this, y);
+ if (op_ret < 0)
+ return;
- map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
- if (iter != slo_parts.begin()) {
- --iter;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
- uint64_t obj_ofs = iter->first;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
+ rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ attrs[RGW_ATTR_TAGS] = tags_bl;
+ return s->bucket->merge_and_store_attrs(this, attrs, y);
+ });
- for (; iter != slo_parts.end() && !found_end; ++iter) {
- rgw_slo_part& part = iter->second;
- rgw_bucket_dir_entry ent;
+}
- ent.key.name = part.obj_name;
- ent.meta.size = part.size;
- ent.meta.etag = part.etag;
+void RGWDeleteBucketTags::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- uint64_t cur_total_len = obj_ofs;
- uint64_t start_ofs = 0, end_ofs = ent.meta.size;
+int RGWDeleteBucketTags::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
- start_ofs = ofs - obj_ofs;
- found_start = true;
- }
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
+}
- obj_ofs += ent.meta.size;
+void RGWDeleteBucketTags::execute(optional_yield y)
+{
+ bufferlist in_data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
- if (!found_end && obj_ofs > (uint64_t)end) {
- end_ofs = end - cur_total_len + 1;
- found_end = true;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
+ rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ attrs.erase(RGW_ATTR_TAGS);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
+ << s->bucket->get_name()
+ << " returned err= " << op_ret << dendl;
}
+ return op_ret;
+ });
+}
- perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now() - start_time));
-
- if (found_start) {
- if (cb) {
- int r = cb(part.bucket, ent, part.bucket_policy, start_ofs, end_ofs, cb_param);
- if (r < 0)
- return r;
- }
- }
+int RGWGetBucketReplication::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- start_time = ceph_clock_now();
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetReplicationConfiguration)) {
+ return -EACCES;
}
return 0;
}
-static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
- const rgw_bucket_dir_entry& ent,
- RGWAccessControlPolicy * const bucket_policy,
- const off_t start_ofs,
- const off_t end_ofs,
- void * const param)
+void RGWGetBucketReplication::pre_exec()
{
- RGWGetObj *op = static_cast<RGWGetObj *>(param);
- return op->read_user_manifest_part(bucket, ent, bucket_policy, start_ofs, end_ofs);
+ rgw_bucket_object_pre_exec(s);
}
-int RGWGetObj::handle_user_manifest(const char *prefix)
+void RGWGetBucketReplication::execute(optional_yield y)
{
- ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix=" << prefix << dendl;
+ send_response_data();
+}
- string prefix_str = prefix;
- size_t pos = prefix_str.find('/');
- if (pos == string::npos)
- return -EINVAL;
+int RGWPutBucketReplication::verify_permission(optional_yield y) {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutReplicationConfiguration);
+}
- string bucket_name_raw, bucket_name;
- bucket_name_raw = prefix_str.substr(0, pos);
- url_decode(bucket_name_raw, bucket_name);
+void RGWPutBucketReplication::execute(optional_yield y) {
- string obj_prefix_raw, obj_prefix;
- obj_prefix_raw = prefix_str.substr(pos + 1);
- url_decode(obj_prefix_raw, obj_prefix);
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
- rgw_bucket bucket;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
- RGWAccessControlPolicy _bucket_policy(s->cct);
- RGWAccessControlPolicy *bucket_policy;
- RGWBucketInfo bucket_info;
- RGWBucketInfo *pbucket_info;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ auto sync_policy = (s->bucket->get_info().sync_policy ? *s->bucket->get_info().sync_policy : rgw_sync_policy_info());
- if (bucket_name.compare(s->bucket.name) != 0) {
- map<string, bufferlist> bucket_attrs;
- RGWObjectCtx obj_ctx(store);
- int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
- bucket_name, bucket_info, NULL,
- &bucket_attrs);
- if (r < 0) {
- ldout(s->cct, 0) << "could not get bucket info for bucket="
- << bucket_name << dendl;
- return r;
+ for (auto& group : sync_policy_groups) {
+ sync_policy.groups[group.id] = group;
}
- bucket = bucket_info.bucket;
- pbucket_info = &bucket_info;
- bucket_policy = &_bucket_policy;
- r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_policy, bucket);
- if (r < 0) {
- ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
- return r;
- }
- } else {
- bucket = s->bucket;
- pbucket_info = &s->bucket_info;
- bucket_policy = s->bucket_acl;
- }
- /* dry run to find out:
- * - total length (of the parts we are going to send to client),
- * - overall DLO's content size,
- * - md5 sum of overall DLO's content (for etag of Swift API). */
- int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
- pbucket_info, obj_prefix, bucket_policy,
- &total_len, &s->obj_size, &lo_etag,
- nullptr /* cb */, nullptr /* cb arg */);
- if (r < 0) {
- return r;
- }
+ s->bucket->get_info().set_sync_policy(std::move(sync_policy));
+
+ int ret = s->bucket->put_info(this, false, real_time());
+ if (ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl;
+ return ret;
+ }
- if (!get_data) {
- bufferlist bl;
- send_response_data(bl, 0, 0);
return 0;
- }
+ });
+}
- r = iterate_user_manifest_parts(s->cct, store, ofs, end,
- pbucket_info, obj_prefix, bucket_policy,
- nullptr, nullptr, nullptr,
- get_obj_user_manifest_iterate_cb, (void *)this);
- if (r < 0) {
- return r;
- }
+void RGWDeleteBucketReplication::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- if (!total_len) {
- bufferlist bl;
- send_response_data(bl, 0, 0);
- }
+int RGWDeleteBucketReplication::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- return 0;
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteReplicationConfiguration);
}
-int RGWGetObj::handle_slo_manifest(bufferlist& bl)
+void RGWDeleteBucketReplication::execute(optional_yield y)
{
- RGWSLOInfo slo_info;
- bufferlist::iterator bliter = bl.begin();
- try {
- ::decode(slo_info, bliter);
- } catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
- return -EIO;
+ bufferlist in_data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
}
- ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
-
- list<RGWAccessControlPolicy> allocated_policies;
- map<string, RGWAccessControlPolicy *> policies;
- map<string, rgw_bucket> buckets;
- map<uint64_t, rgw_slo_part> slo_parts;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ if (!s->bucket->get_info().sync_policy) {
+ return 0;
+ }
- MD5 etag_sum;
- total_len = 0;
+ rgw_sync_policy_info sync_policy = *s->bucket->get_info().sync_policy;
- for (const auto& entry : slo_info.entries) {
- const string& path = entry.path;
+ update_sync_policy(&sync_policy);
- /* If the path starts with slashes, strip them all. */
- const size_t pos_init = path.find_first_not_of('/');
- /* According to the documentation of std::string::find following check
- * is not necessary as we should get the std::string::npos propagation
- * here. This might be true with the accuracy to implementation's bugs.
- * See following question on SO:
- * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
- */
- if (pos_init == string::npos) {
- return -EINVAL;
- }
+ s->bucket->get_info().set_sync_policy(std::move(sync_policy));
- const size_t pos_sep = path.find('/', pos_init);
- if (pos_sep == string::npos) {
- return -EINVAL;
+ int ret = s->bucket->put_info(this, false, real_time());
+ if (ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl;
+ return ret;
}
- string bucket_name = path.substr(pos_init, pos_sep - pos_init);
- string obj_name = path.substr(pos_sep + 1);
-
- rgw_bucket bucket;
- RGWAccessControlPolicy *bucket_policy;
+ return 0;
+ });
+}
- if (bucket_name.compare(s->bucket.name) != 0) {
- const auto& piter = policies.find(bucket_name);
- if (piter != policies.end()) {
- bucket_policy = piter->second;
- bucket = buckets[bucket_name];
- } else {
- allocated_policies.push_back(RGWAccessControlPolicy(s->cct));
- RGWAccessControlPolicy& _bucket_policy = allocated_policies.back();
-
- RGWBucketInfo bucket_info;
- map<string, bufferlist> bucket_attrs;
- RGWObjectCtx obj_ctx(store);
- int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
- bucket_name, bucket_info, nullptr,
- &bucket_attrs);
- if (r < 0) {
- ldout(s->cct, 0) << "could not get bucket info for bucket="
- << bucket_name << dendl;
- return r;
- }
- bucket = bucket_info.bucket;
- bucket_policy = &_bucket_policy;
- r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_policy,
- bucket);
- if (r < 0) {
- ldout(s->cct, 0) << "failed to read bucket policy for bucket "
- << bucket << dendl;
- return r;
- }
- buckets[bucket_name] = bucket;
- policies[bucket_name] = bucket_policy;
- }
+int RGWOp::do_aws4_auth_completion()
+{
+ ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
+ if (s->auth.completer) {
+ if (!s->auth.completer->complete()) {
+ return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
} else {
- bucket = s->bucket;
- bucket_policy = s->bucket_acl;
+ ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
}
- rgw_slo_part part;
- part.bucket_policy = bucket_policy;
- part.bucket = bucket;
- part.obj_name = obj_name;
- part.size = entry.size_bytes;
- part.etag = entry.etag;
- ldout(s->cct, 20) << "slo_part: ofs=" << ofs
- << " bucket=" << part.bucket
- << " obj=" << part.obj_name
- << " size=" << part.size
- << " etag=" << part.etag
- << dendl;
+ /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
+ * call passes, so we disable second one. This is old behaviour, sorry!
+ * Plan for tomorrow: seek and destroy. */
+ s->auth.completer = nullptr;
+ }
- etag_sum.Update((const byte *)entry.etag.c_str(),
- entry.etag.length());
+ return 0;
+}
- slo_parts[total_len] = part;
- total_len += part.size;
+int RGWOp::init_quota()
+{
+ /* no quota enforcement for system requests */
+ if (s->system_request)
+ return 0;
+
+ /* init quota related stuff */
+ if (!(s->user->get_info().op_mask & RGW_OP_TYPE_MODIFY)) {
+ return 0;
}
- complete_etag(etag_sum, &lo_etag);
+ /* only interested in object related ops */
+ if (rgw::sal::Bucket::empty(s->bucket.get())
+ || rgw::sal::Object::empty(s->object.get())) {
+ return 0;
+ }
- s->obj_size = slo_info.total_size;
- ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
+ std::unique_ptr<rgw::sal::User> owner_user =
+ store->get_user(s->bucket->get_info().owner);
+ rgw::sal::User* user;
- if (ofs < 0) {
- ofs = total_len - std::min(-ofs, static_cast<off_t>(total_len));
+ if (s->user->get_id() == s->bucket_owner.get_id()) {
+ user = s->user.get();
+ } else {
+ int r = owner_user->load_user(this, s->yield);
+ if (r < 0)
+ return r;
+ user = owner_user.get();
}
- if (end < 0 || end >= static_cast<off_t>(total_len)) {
- end = total_len - 1;
- }
+ store->get_quota(bucket_quota, user_quota);
- total_len = end - ofs + 1;
+ if (s->bucket->get_info().quota.enabled) {
+ bucket_quota = s->bucket->get_info().quota;
+ } else if (user->get_info().bucket_quota.enabled) {
+ bucket_quota = user->get_info().bucket_quota;
+ }
- int r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
- get_obj_user_manifest_iterate_cb, (void *)this);
- if (r < 0) {
- return r;
+ if (user->get_info().user_quota.enabled) {
+ user_quota = user->get_info().user_quota;
}
return 0;
}
-int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
-{
- /* garbage collection related handling */
- utime_t start_time = ceph_clock_now();
- if (start_time > gc_invalidate_time) {
- int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
- if (r < 0) {
- dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
- }
- gc_invalidate_time = start_time;
- gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
- }
- return send_response_data(bl, bl_ofs, bl_len);
-}
+static bool validate_cors_rule_method(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_meth) {
+ uint8_t flags = 0;
-bool RGWGetObj::prefetch_data()
-{
- /* HEAD request, stop prefetch*/
- if (!get_data) {
+ if (!req_meth) {
+ ldpp_dout(dpp, 5) << "req_meth is null" << dendl;
return false;
}
- bool prefetch_first_chunk = true;
- range_str = s->info.env->get("HTTP_RANGE");
+ if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
+ else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
+ else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
+ else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
+ else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
- if(range_str) {
- int r = parse_range(range_str, ofs, end, &partial_content);
- /* error on parsing the range, stop prefetch and will fail in execte() */
- if (r < 0) {
- range_parsed = false;
- return false;
- } else {
- range_parsed = true;
- }
- /* range get goes to shadown objects, stop prefetch */
- if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
- prefetch_first_chunk = false;
- }
+ if (rule->get_allowed_methods() & flags) {
+ ldpp_dout(dpp, 10) << "Method " << req_meth << " is supported" << dendl;
+ } else {
+ ldpp_dout(dpp, 5) << "Method " << req_meth << " is not supported" << dendl;
+ return false;
}
- return get_data && prefetch_first_chunk;
-}
-void RGWGetObj::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ return true;
}
-static bool object_is_expired(map<string, bufferlist>& attrs) {
- map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
- if (iter != attrs.end()) {
- utime_t delete_at;
- try {
- ::decode(delete_at, iter->second);
- } catch (buffer::error& err) {
- dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
- return false;
- }
-
- if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
- return true;
+static bool validate_cors_rule_header(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs) {
+ if (req_hdrs) {
+ vector<string> hdrs;
+ get_str_vec(req_hdrs, hdrs);
+ for (const auto& hdr : hdrs) {
+ if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
+ ldpp_dout(dpp, 5) << "Header " << hdr << " is not registered in this rule" << dendl;
+ return false;
+ }
}
}
-
- return false;
+ return true;
}
-void RGWGetObj::execute()
+int RGWOp::read_bucket_cors()
{
- utime_t start_time = s->time;
bufferlist bl;
- gc_invalidate_time = ceph_clock_now();
- gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
-
- bool need_decompress;
- int64_t ofs_x, end_x;
-
- RGWGetObj_CB cb(this);
- RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
- boost::optional<RGWGetObj_Decompress> decompress;
- std::unique_ptr<RGWGetDataCB> decrypt;
- map<string, bufferlist>::iterator attr_iter;
-
- perfcounter->inc(l_rgw_get);
-
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- RGWRados::Object::Read read_op(&op_target);
- op_ret = get_params();
- if (op_ret < 0)
- goto done_err;
+ map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
+ if (aiter == s->bucket_attrs.end()) {
+ ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
+ cors_exist = false;
+ return 0; /* no CORS configuration found */
+ }
- op_ret = init_common();
- if (op_ret < 0)
- goto done_err;
+ cors_exist = true;
- read_op.conds.mod_ptr = mod_ptr;
- read_op.conds.unmod_ptr = unmod_ptr;
- read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
- read_op.conds.mod_zone_id = mod_zone_id;
- read_op.conds.mod_pg_ver = mod_pg_ver;
- read_op.conds.if_match = if_match;
- read_op.conds.if_nomatch = if_nomatch;
- read_op.params.attrs = &attrs;
- read_op.params.lastmod = &lastmod;
- read_op.params.obj_size = &s->obj_size;
- read_op.params.perr = &s->err;
-
- op_ret = read_op.prepare();
- if (op_ret < 0)
- goto done_err;
+ bl = aiter->second;
- /* STAT ops don't need data, and do no i/o */
- if (get_type() == RGW_OP_STAT_OBJ) {
- return;
+ auto iter = bl.cbegin();
+ try {
+ bucket_cors.decode(iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: could not decode CORS, caught buffer::error" << dendl;
+ return -EIO;
}
+ if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
+ RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
+ ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
+ s3cors->to_xml(*_dout);
+ *_dout << dendl;
+ }
+ return 0;
+}
- /* start gettorrent */
- if (torrent.get_flag())
- {
- torrent.init(s, store);
- torrent.get_torrent_file(op_ret, read_op, total_len, bl, obj);
- if (op_ret < 0)
- {
- ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
- << dendl;
- goto done_err;
- }
- op_ret = send_response_data(bl, 0, total_len);
- if (op_ret < 0)
- {
- ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
- << dendl;
- goto done_err;
+/** CORS 6.2.6.
+ * If any of the header field-names is not a ASCII case-insensitive match for
+ * any of the values in list of headers do not set any additional headers and
+ * terminate this set of steps.
+ * */
+static void get_cors_response_headers(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
+ if (req_hdrs) {
+ list<string> hl;
+ get_str_list(req_hdrs, hl);
+ for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
+ if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
+ ldpp_dout(dpp, 5) << "Header " << (*it) << " is not registered in this rule" << dendl;
+ } else {
+ if (hdrs.length() > 0) hdrs.append(",");
+ hdrs.append((*it));
+ }
}
- return;
}
- /* end gettorrent */
+ rule->format_exp_headers(exp_hdrs);
+ *max_age = rule->get_max_age();
+}
- op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
- if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
- goto done_err;
+/**
+ * Generate the CORS header response
+ *
+ * This is described in the CORS standard, section 6.2.
+ */
+bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
+{
+ /* CORS 6.2.1. */
+ const char *orig = s->info.env->get("HTTP_ORIGIN");
+ if (!orig) {
+ return false;
}
- if (need_decompress) {
- s->obj_size = cs_info.orig_size;
- decompress.emplace(s->cct, &cs_info, partial_content, filter);
+
+ /* Custom: */
+ origin = orig;
+ int temp_op_ret = read_bucket_cors();
+ if (temp_op_ret < 0) {
+ op_ret = temp_op_ret;
+ return false;
+ }
+
+ if (!cors_exist) {
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
+ return false;
+ }
+
+ /* CORS 6.2.2. */
+ RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
+ if (!rule)
+ return false;
+
+ /*
+ * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
+ * and no Authorization was send by the client
+ *
+ * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
+ * For requests without credentials, the server may specify "*" as a wildcard,
+ * thereby allowing any origin to access the resource.
+ */
+ const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
+ if (!authorization && rule->has_wildcard_origin())
+ origin = "*";
+
+ /* CORS 6.2.3. */
+ const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
+ if (!req_meth) {
+ req_meth = s->info.method;
+ }
+
+ if (req_meth) {
+ method = req_meth;
+ /* CORS 6.2.5. */
+ if (!validate_cors_rule_method(this, rule, req_meth)) {
+ return false;
+ }
+ }
+
+ /* CORS 6.2.4. */
+ const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
+
+ /* CORS 6.2.6. */
+ get_cors_response_headers(this, rule, req_hdrs, headers, exp_headers, max_age);
+
+ return true;
+}
+
+int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket,
+ const rgw_bucket_dir_entry& ent,
+ RGWAccessControlPolicy * const bucket_acl,
+ const boost::optional<Policy>& bucket_policy,
+ const off_t start_ofs,
+ const off_t end_ofs,
+ bool swift_slo)
+{
+ ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
+ << "[" << ent.key.instance << "]" << dendl;
+ RGWGetObj_CB cb(this);
+ RGWGetObj_Filter* filter = &cb;
+ boost::optional<RGWGetObj_Decompress> decompress;
+
+ int64_t cur_ofs = start_ofs;
+ int64_t cur_end = end_ofs;
+
+ std::unique_ptr<rgw::sal::Object> part = bucket->get_object(ent.key);
+
+ RGWObjectCtx obj_ctx(store);
+ RGWAccessControlPolicy obj_policy(s->cct);
+
+ ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
+ << " end=" << cur_end << dendl;
+
+ part->set_atomic(&obj_ctx);
+ part->set_prefetch_data(&obj_ctx);
+
+ std::unique_ptr<rgw::sal::Object::ReadOp> read_op = part->get_read_op(&obj_ctx);
+
+ if (!swift_slo) {
+ /* SLO etag is optional */
+ read_op->params.if_match = ent.meta.etag.c_str();
+ }
+
+ op_ret = read_op->prepare(s->yield, this);
+ if (op_ret < 0)
+ return op_ret;
+ op_ret = part->range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
+ if (op_ret < 0)
+ return op_ret;
+ bool need_decompress;
+ op_ret = rgw_compression_info_from_attrset(part->get_attrs(), need_decompress, cs_info);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
+ return -EIO;
+ }
+
+ if (need_decompress)
+ {
+ if (cs_info.orig_size != ent.meta.accounted_size) {
+ // hmm.. something wrong, object not as expected, abort!
+ ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
+ << ", actual read size=" << ent.meta.size << dendl;
+ return -EIO;
+ }
+ decompress.emplace(s->cct, &cs_info, partial_content, filter);
+ filter = &*decompress;
+ }
+ else
+ {
+ if (part->get_obj_size() != ent.meta.size) {
+ // hmm.. something wrong, object not as expected, abort!
+ ldpp_dout(this, 0) << "ERROR: expected obj_size=" << part->get_obj_size()
+ << ", actual read size=" << ent.meta.size << dendl;
+ return -EIO;
+ }
+ }
+
+ op_ret = rgw_policy_from_attrset(s, s->cct, part->get_attrs(), &obj_policy);
+ if (op_ret < 0)
+ return op_ret;
+
+ /* We can use global user_acl because LOs cannot have segments
+ * stored inside different accounts. */
+ if (s->system_request) {
+ ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
+ } else if (s->auth.identity->is_admin_of(s->user->get_id())) {
+ ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
+ } else if (!verify_object_permission(this, s, part->get_obj(), s->user_acl.get(),
+ bucket_acl, &obj_policy, bucket_policy,
+ s->iam_user_policies, s->session_policies, action)) {
+ return -EPERM;
+ }
+ if (ent.meta.size == 0) {
+ return 0;
+ }
+
+ perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
+ filter->fixup_range(cur_ofs, cur_end);
+ op_ret = read_op->iterate(this, cur_ofs, cur_end, filter, s->yield);
+ if (op_ret >= 0)
+ op_ret = filter->flush();
+ return op_ret;
+}
+
+static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp,
+ CephContext * const cct,
+ rgw::sal::Store* const store,
+ const off_t ofs,
+ const off_t end,
+ rgw::sal::Bucket* bucket,
+ const string& obj_prefix,
+ RGWAccessControlPolicy * const bucket_acl,
+ const boost::optional<Policy>& bucket_policy,
+ uint64_t * const ptotal_len,
+ uint64_t * const pobj_size,
+ string * const pobj_sum,
+ int (*cb)(rgw::sal::Bucket* bucket,
+ const rgw_bucket_dir_entry& ent,
+ RGWAccessControlPolicy * const bucket_acl,
+ const boost::optional<Policy>& bucket_policy,
+ off_t start_ofs,
+ off_t end_ofs,
+ void *param,
+ bool swift_slo),
+ void * const cb_param,
+ optional_yield y)
+{
+ uint64_t obj_ofs = 0, len_count = 0;
+ bool found_start = false, found_end = false, handled_end = false;
+ string delim;
+
+ utime_t start_time = ceph_clock_now();
+
+ rgw::sal::Bucket::ListParams params;
+ params.prefix = obj_prefix;
+ params.delim = delim;
+
+ rgw::sal::Bucket::ListResults results;
+ MD5 etag_sum;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ etag_sum.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ do {
+ static constexpr auto MAX_LIST_OBJS = 100u;
+ int r = bucket->list(dpp, params, MAX_LIST_OBJS, results, y);
+ if (r < 0) {
+ return r;
+ }
+
+ for (rgw_bucket_dir_entry& ent : results.objs) {
+ const uint64_t cur_total_len = obj_ofs;
+ const uint64_t obj_size = ent.meta.accounted_size;
+ uint64_t start_ofs = 0, end_ofs = obj_size;
+
+ if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
+ start_ofs = ofs - obj_ofs;
+ found_start = true;
+ }
+
+ obj_ofs += obj_size;
+ if (pobj_sum) {
+ etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(),
+ ent.meta.etag.length());
+ }
+
+ if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
+ end_ofs = end - cur_total_len + 1;
+ found_end = true;
+ }
+
+ perfcounter->tinc(l_rgw_get_lat,
+ (ceph_clock_now() - start_time));
+
+ if (found_start && !handled_end) {
+ len_count += end_ofs - start_ofs;
+
+ if (cb) {
+ r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs,
+ cb_param, false /* swift_slo */);
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+
+ handled_end = found_end;
+ start_time = ceph_clock_now();
+ }
+ } while (results.is_truncated);
+
+ if (ptotal_len) {
+ *ptotal_len = len_count;
+ }
+ if (pobj_size) {
+ *pobj_size = obj_ofs;
+ }
+ if (pobj_sum) {
+ complete_etag(etag_sum, pobj_sum);
+ }
+
+ return 0;
+}
+
+struct rgw_slo_part {
+ RGWAccessControlPolicy *bucket_acl = nullptr;
+ Policy* bucket_policy = nullptr;
+ rgw::sal::Bucket* bucket;
+ string obj_name;
+ uint64_t size = 0;
+ string etag;
+};
+
+static int iterate_slo_parts(const DoutPrefixProvider *dpp,
+ CephContext *cct,
+ rgw::sal::Store*store,
+ off_t ofs,
+ off_t end,
+ map<uint64_t, rgw_slo_part>& slo_parts,
+ int (*cb)(rgw::sal::Bucket* bucket,
+ const rgw_bucket_dir_entry& ent,
+ RGWAccessControlPolicy *bucket_acl,
+ const boost::optional<Policy>& bucket_policy,
+ off_t start_ofs,
+ off_t end_ofs,
+ void *param,
+ bool swift_slo),
+ void *cb_param)
+{
+ bool found_start = false, found_end = false;
+
+ if (slo_parts.empty()) {
+ return 0;
+ }
+
+ utime_t start_time = ceph_clock_now();
+
+ map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
+ if (iter != slo_parts.begin()) {
+ --iter;
+ }
+
+ uint64_t obj_ofs = iter->first;
+
+ for (; iter != slo_parts.end() && !found_end; ++iter) {
+ rgw_slo_part& part = iter->second;
+ rgw_bucket_dir_entry ent;
+
+ ent.key.name = part.obj_name;
+ ent.meta.accounted_size = ent.meta.size = part.size;
+ ent.meta.etag = part.etag;
+
+ uint64_t cur_total_len = obj_ofs;
+ uint64_t start_ofs = 0, end_ofs = ent.meta.size - 1;
+
+ if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
+ start_ofs = ofs - obj_ofs;
+ found_start = true;
+ }
+
+ obj_ofs += ent.meta.size;
+
+ if (!found_end && obj_ofs > (uint64_t)end) {
+ end_ofs = end - cur_total_len;
+ found_end = true;
+ }
+
+ perfcounter->tinc(l_rgw_get_lat,
+ (ceph_clock_now() - start_time));
+
+ if (found_start) {
+ if (cb) {
+ ldpp_dout(dpp, 20) << "iterate_slo_parts()"
+ << " obj=" << part.obj_name
+ << " start_ofs=" << start_ofs
+ << " end_ofs=" << end_ofs
+ << dendl;
+
+ // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
+ int r = cb(part.bucket, ent, part.bucket_acl,
+ (part.bucket_policy ?
+ boost::optional<Policy>(*part.bucket_policy) : none),
+ start_ofs, end_ofs, cb_param, true /* swift_slo */);
+ if (r < 0)
+ return r;
+ }
+ }
+
+ start_time = ceph_clock_now();
+ }
+
+ return 0;
+}
+
+static int get_obj_user_manifest_iterate_cb(rgw::sal::Bucket* bucket,
+ const rgw_bucket_dir_entry& ent,
+ RGWAccessControlPolicy * const bucket_acl,
+ const boost::optional<Policy>& bucket_policy,
+ const off_t start_ofs,
+ const off_t end_ofs,
+ void * const param,
+ bool swift_slo = false)
+{
+ RGWGetObj *op = static_cast<RGWGetObj *>(param);
+ return op->read_user_manifest_part(
+ bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo);
+}
+
+int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y)
+{
+ const std::string_view prefix_view(prefix);
+ ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
+ << prefix_view << dendl;
+
+ const size_t pos = prefix_view.find('/');
+ if (pos == string::npos) {
+ return -EINVAL;
+ }
+
+ const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
+ const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
+
+ RGWAccessControlPolicy _bucket_acl(s->cct);
+ RGWAccessControlPolicy *bucket_acl;
+ boost::optional<Policy> _bucket_policy;
+ boost::optional<Policy>* bucket_policy;
+ RGWBucketInfo bucket_info;
+ std::unique_ptr<rgw::sal::Bucket> ubucket;
+ rgw::sal::Bucket* pbucket = NULL;
+ int r = 0;
+
+ if (bucket_name.compare(s->bucket->get_name()) != 0) {
+ map<string, bufferlist> bucket_attrs;
+ r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y);
+ if (r < 0) {
+ ldpp_dout(this, 0) << "could not get bucket info for bucket="
+ << bucket_name << dendl;
+ return r;
+ }
+ bucket_acl = &_bucket_acl;
+ r = read_bucket_policy(this, store, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y);
+ if (r < 0) {
+ ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
+ return r;
+ }
+ _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs, s->user->get_tenant());
+ bucket_policy = &_bucket_policy;
+ pbucket = ubucket.get();
+ } else {
+ pbucket = s->bucket.get();
+ bucket_acl = s->bucket_acl.get();
+ bucket_policy = &s->iam_policy;
+ }
+
+ /* dry run to find out:
+ * - total length (of the parts we are going to send to client),
+ * - overall DLO's content size,
+ * - md5 sum of overall DLO's content (for etag of Swift API). */
+ r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ pbucket, obj_prefix, bucket_acl, *bucket_policy,
+ nullptr, &s->obj_size, &lo_etag,
+ nullptr /* cb */, nullptr /* cb arg */, y);
+ if (r < 0) {
+ return r;
+ }
+ s->object->set_obj_size(s->obj_size);
+
+ r = s->object->range_to_ofs(s->obj_size, ofs, end);
+ if (r < 0) {
+ return r;
+ }
+
+ r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ pbucket, obj_prefix, bucket_acl, *bucket_policy,
+ &total_len, nullptr, nullptr,
+ nullptr, nullptr, y);
+ if (r < 0) {
+ return r;
+ }
+
+ if (!get_data) {
+ bufferlist bl;
+ send_response_data(bl, 0, 0);
+ return 0;
+ }
+
+ r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ pbucket, obj_prefix, bucket_acl, *bucket_policy,
+ nullptr, nullptr, nullptr,
+ get_obj_user_manifest_iterate_cb, (void *)this, y);
+ if (r < 0) {
+ return r;
+ }
+
+ if (!total_len) {
+ bufferlist bl;
+ send_response_data(bl, 0, 0);
+ }
+
+ return r;
+}
+
+int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
+{
+ RGWSLOInfo slo_info;
+ auto bliter = bl.cbegin();
+ try {
+ decode(slo_info, bliter);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
+ return -EIO;
+ }
+ ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
+
+ vector<RGWAccessControlPolicy> allocated_acls;
+ map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
+ map<string, std::unique_ptr<rgw::sal::Bucket>> buckets;
+
+ map<uint64_t, rgw_slo_part> slo_parts;
+
+ MD5 etag_sum;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ etag_sum.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ total_len = 0;
+
+ for (const auto& entry : slo_info.entries) {
+ const string& path = entry.path;
+
+ /* If the path starts with slashes, strip them all. */
+ const size_t pos_init = path.find_first_not_of('/');
+ /* According to the documentation of std::string::find following check
+ * is not necessary as we should get the std::string::npos propagation
+ * here. This might be true with the accuracy to implementation's bugs.
+ * See following question on SO:
+ * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
+ */
+ if (pos_init == string::npos) {
+ return -EINVAL;
+ }
+
+ const size_t pos_sep = path.find('/', pos_init);
+ if (pos_sep == string::npos) {
+ return -EINVAL;
+ }
+
+ string bucket_name = path.substr(pos_init, pos_sep - pos_init);
+ string obj_name = path.substr(pos_sep + 1);
+
+ rgw::sal::Bucket* bucket;
+ RGWAccessControlPolicy *bucket_acl;
+ Policy* bucket_policy;
+
+ if (bucket_name.compare(s->bucket->get_name()) != 0) {
+ const auto& piter = policies.find(bucket_name);
+ if (piter != policies.end()) {
+ bucket_acl = piter->second.first;
+ bucket_policy = piter->second.second.get_ptr();
+ bucket = buckets[bucket_name].get();
+ } else {
+ allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
+ RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
+
+ std::unique_ptr<rgw::sal::Bucket> tmp_bucket;
+ int r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y);
+ if (r < 0) {
+ ldpp_dout(this, 0) << "could not get bucket info for bucket="
+ << bucket_name << dendl;
+ return r;
+ }
+ bucket = tmp_bucket.get();
+ bucket_acl = &_bucket_acl;
+ r = read_bucket_policy(this, store, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl,
+ tmp_bucket->get_key(), y);
+ if (r < 0) {
+ ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
+ << bucket << dendl;
+ return r;
+ }
+ auto _bucket_policy = get_iam_policy_from_attr(
+ s->cct, tmp_bucket->get_attrs(), tmp_bucket->get_tenant());
+ bucket_policy = _bucket_policy.get_ptr();
+ buckets[bucket_name].swap(tmp_bucket);
+ policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
+ }
+ } else {
+ bucket = s->bucket.get();
+ bucket_acl = s->bucket_acl.get();
+ bucket_policy = s->iam_policy.get_ptr();
+ }
+
+ rgw_slo_part part;
+ part.bucket_acl = bucket_acl;
+ part.bucket_policy = bucket_policy;
+ part.bucket = bucket;
+ part.obj_name = obj_name;
+ part.size = entry.size_bytes;
+ part.etag = entry.etag;
+ ldpp_dout(this, 20) << "slo_part: bucket=" << part.bucket
+ << " obj=" << part.obj_name
+ << " size=" << part.size
+ << " etag=" << part.etag
+ << dendl;
+
+ etag_sum.Update((const unsigned char *)entry.etag.c_str(),
+ entry.etag.length());
+
+ slo_parts[total_len] = part;
+ total_len += part.size;
+ } /* foreach entry */
+
+ complete_etag(etag_sum, &lo_etag);
+
+ s->obj_size = slo_info.total_size;
+ s->object->set_obj_size(slo_info.total_size);
+ ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
+
+ int r = s->object->range_to_ofs(total_len, ofs, end);
+ if (r < 0) {
+ return r;
+ }
+
+ total_len = end - ofs + 1;
+ ldpp_dout(this, 20) << "Requested: ofs=" << ofs
+ << " end=" << end
+ << " total=" << total_len
+ << dendl;
+
+ r = iterate_slo_parts(this, s->cct, store, ofs, end, slo_parts,
+ get_obj_user_manifest_iterate_cb, (void *)this);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
+{
+ /* garbage collection related handling:
+ * defer_gc disabled for https://tracker.ceph.com/issues/47866 */
+ return send_response_data(bl, bl_ofs, bl_len);
+}
+
+bool RGWGetObj::prefetch_data()
+{
+ /* HEAD request, stop prefetch*/
+ if (!get_data || s->info.env->exists("HTTP_X_RGW_AUTH")) {
+ return false;
+ }
+
+ range_str = s->info.env->get("HTTP_RANGE");
+ // TODO: add range prefetch
+ if (range_str) {
+ parse_range();
+ return false;
+ }
+
+ return get_data;
+}
+
+void RGWGetObj::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+static inline void rgw_cond_decode_objtags(
+ struct req_state *s,
+ const std::map<std::string, buffer::list> &attrs)
+{
+ const auto& tags = attrs.find(RGW_ATTR_TAGS);
+ if (tags != attrs.end()) {
+ try {
+ bufferlist::const_iterator iter{&tags->second};
+ s->tagset.decode(iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(s, 0)
+ << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
+ }
+ }
+}
+
+void RGWGetObj::execute(optional_yield y)
+{
+ bufferlist bl;
+ gc_invalidate_time = ceph_clock_now();
+ gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
+
+ bool need_decompress;
+ int64_t ofs_x, end_x;
+
+ RGWGetObj_CB cb(this);
+ RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb;
+ boost::optional<RGWGetObj_Decompress> decompress;
+ std::unique_ptr<RGWGetObj_Filter> decrypt;
+ map<string, bufferlist>::iterator attr_iter;
+
+ perfcounter->inc(l_rgw_get);
+
+ std::unique_ptr<rgw::sal::Object::ReadOp> read_op(s->object->get_read_op(s->obj_ctx));
+
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ goto done_err;
+
+ op_ret = init_common();
+ if (op_ret < 0)
+ goto done_err;
+
+ read_op->params.mod_ptr = mod_ptr;
+ read_op->params.unmod_ptr = unmod_ptr;
+ read_op->params.high_precision_time = s->system_request; /* system request need to use high precision time */
+ read_op->params.mod_zone_id = mod_zone_id;
+ read_op->params.mod_pg_ver = mod_pg_ver;
+ read_op->params.if_match = if_match;
+ read_op->params.if_nomatch = if_nomatch;
+ read_op->params.lastmod = &lastmod;
+
+ op_ret = read_op->prepare(s->yield, this);
+ if (op_ret < 0)
+ goto done_err;
+ version_id = s->object->get_instance();
+ s->obj_size = s->object->get_obj_size();
+ attrs = s->object->get_attrs();
+
+ /* STAT ops don't need data, and do no i/o */
+ if (get_type() == RGW_OP_STAT_OBJ) {
+ return;
+ }
+ if (s->info.env->exists("HTTP_X_RGW_AUTH")) {
+ op_ret = 0;
+ goto done_err;
+ }
+ /* start gettorrent */
+ if (torrent.get_flag())
+ {
+ attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
+ if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
+ ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
+ "encrypted with SSE-C" << dendl;
+ op_ret = -EINVAL;
+ goto done_err;
+ }
+ torrent.init(s, store);
+ rgw_obj obj = s->object->get_obj();
+ op_ret = torrent.get_torrent_file(s->object.get(), total_len, bl, obj);
+ if (op_ret < 0)
+ {
+ ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
+ << dendl;
+ goto done_err;
+ }
+ op_ret = send_response_data(bl, 0, total_len);
+ if (op_ret < 0)
+ {
+ ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
+ goto done_err;
+ }
+ return;
+ }
+ /* end gettorrent */
+
+ op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
+ goto done_err;
+ }
+ if (need_decompress) {
+ s->obj_size = cs_info.orig_size;
+ s->object->set_obj_size(cs_info.orig_size);
+ decompress.emplace(s->cct, &cs_info, partial_content, filter);
filter = &*decompress;
}
- // for range requests with obj size 0
- if (range_str && !(s->obj_size)) {
- total_len = 0;
- op_ret = -ERANGE;
- goto done_err;
+
+ attr_iter = attrs.find(RGW_ATTR_MANIFEST);
+ if (attr_iter != attrs.end() && get_type() == RGW_OP_GET_OBJ && get_data) {
+ RGWObjManifest m;
+ decode(m, attr_iter->second);
+ if (m.get_tier_type() == "cloud-s3") {
+ /* XXX: Instead send presigned redirect or read-through */
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ ldpp_dout(this, 0) << "ERROR: Cannot get cloud tiered object. Failing with "
+ << op_ret << dendl;
+ goto done_err;
+ }
+ }
+
+ attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
+ if (attr_iter != attrs.end() && !skip_manifest) {
+ op_ret = handle_user_manifest(attr_iter->second.c_str(), y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
+ << op_ret << dendl;
+ goto done_err;
+ }
+ return;
+ }
+
+ attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
+ if (attr_iter != attrs.end() && !skip_manifest) {
+ is_slo = true;
+ op_ret = handle_slo_manifest(attr_iter->second, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
+ << dendl;
+ goto done_err;
+ }
+ return;
+ }
+
+ // for range requests with obj size 0
+ if (range_str && !(s->obj_size)) {
+ total_len = 0;
+ op_ret = -ERANGE;
+ goto done_err;
+ }
+
+ op_ret = s->object->range_to_ofs(s->obj_size, ofs, end);
+ if (op_ret < 0)
+ goto done_err;
+ total_len = (ofs <= end ? end + 1 - ofs : 0);
+
+ /* Check whether the object has expired. Swift API documentation
+ * stands that we should return 404 Not Found in such case. */
+ if (need_object_expiration() && s->object->is_expired()) {
+ op_ret = -ENOENT;
+ goto done_err;
+ }
+
+ /* Decode S3 objtags, if any */
+ rgw_cond_decode_objtags(s, attrs);
+
+ start = ofs;
+
+ attr_iter = attrs.find(RGW_ATTR_MANIFEST);
+ op_ret = this->get_decrypt_filter(&decrypt, filter,
+ attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
+ if (decrypt != nullptr) {
+ filter = decrypt.get();
+ }
+ if (op_ret < 0) {
+ goto done_err;
+ }
+
+ if (!get_data || ofs > end) {
+ send_response_data(bl, 0, 0);
+ return;
+ }
+
+ perfcounter->inc(l_rgw_get_b, end - ofs);
+
+ ofs_x = ofs;
+ end_x = end;
+ filter->fixup_range(ofs_x, end_x);
+ op_ret = read_op->iterate(this, ofs_x, end_x, filter, s->yield);
+
+ if (op_ret >= 0)
+ op_ret = filter->flush();
+
+ perfcounter->tinc(l_rgw_get_lat, s->time_elapsed());
+ if (op_ret < 0) {
+ goto done_err;
+ }
+
+ op_ret = send_response_data(bl, 0, 0);
+ if (op_ret < 0) {
+ goto done_err;
+ }
+ return;
+
+done_err:
+ send_response_data_error(y);
+}
+
+int RGWGetObj::init_common()
+{
+ if (range_str) {
+ /* range parsed error when prefetch */
+ if (!range_parsed) {
+ int r = parse_range();
+ if (r < 0)
+ return r;
+ }
+ }
+ if (if_mod) {
+ if (parse_time(if_mod, &mod_time) < 0)
+ return -EINVAL;
+ mod_ptr = &mod_time;
+ }
+
+ if (if_unmod) {
+ if (parse_time(if_unmod, &unmod_time) < 0)
+ return -EINVAL;
+ unmod_ptr = &unmod_time;
+ }
+
+ return 0;
+}
+
+int RGWListBuckets::verify_permission(optional_yield y)
+{
+ rgw::Partition partition = rgw::Partition::aws;
+ rgw::Service service = rgw::Service::s3;
+
+ string tenant;
+ if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
+ tenant = s->auth.identity->get_role_tenant();
+ } else {
+ tenant = s->user->get_tenant();
+ }
+
+ if (!verify_user_permission(this, s, ARN(partition, service, "", tenant, "*"), rgw::IAM::s3ListAllMyBuckets)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+int RGWGetUsage::verify_permission(optional_yield y)
+{
+ if (s->auth.identity->is_anonymous()) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWListBuckets::execute(optional_yield y)
+{
+ bool done;
+ bool started = false;
+ uint64_t total_count = 0;
+
+ const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
+
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ goto send_end;
+ }
+
+ if (supports_account_metadata()) {
+ op_ret = s->user->read_attrs(this, s->yield);
+ if (op_ret < 0) {
+ goto send_end;
+ }
+ }
+
+ is_truncated = false;
+ do {
+ rgw::sal::BucketList buckets;
+ uint64_t read_count;
+ if (limit >= 0) {
+ read_count = min(limit - total_count, max_buckets);
+ } else {
+ read_count = max_buckets;
+ }
+
+ op_ret = s->user->list_buckets(this, marker, end_marker, read_count, should_get_stats(), buckets, y);
+
+ if (op_ret < 0) {
+ /* hmm.. something wrong here.. the user was authenticated, so it
+ should exist */
+ ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
+ << s->user->get_id() << dendl;
+ break;
+ }
+
+ /* We need to have stats for all our policies - even if a given policy
+ * isn't actually used in a given account. In such situation its usage
+ * stats would be simply full of zeros. */
+ for (const auto& policy : store->get_zone()->get_zonegroup().placement_targets) {
+ policies_stats.emplace(policy.second.name,
+ decltype(policies_stats)::mapped_type());
+ }
+
+ std::map<std::string, std::unique_ptr<rgw::sal::Bucket>>& m = buckets.get_buckets();
+ for (const auto& kv : m) {
+ const auto& bucket = kv.second;
+
+ global_stats.bytes_used += bucket->get_size();
+ global_stats.bytes_used_rounded += bucket->get_size_rounded();
+ global_stats.objects_count += bucket->get_count();
+
+ /* operator[] still can create a new entry for storage policy seen
+ * for first time. */
+ auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
+ policy_stats.bytes_used += bucket->get_size();
+ policy_stats.bytes_used_rounded += bucket->get_size_rounded();
+ policy_stats.buckets_count++;
+ policy_stats.objects_count += bucket->get_count();
+ }
+ global_stats.buckets_count += m.size();
+ total_count += m.size();
+
+ done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
+
+ if (!started) {
+ send_response_begin(buckets.count() > 0);
+ started = true;
+ }
+
+ if (read_count > 0 &&
+ !m.empty()) {
+ auto riter = m.rbegin();
+ marker = riter->first;
+
+ handle_listing_chunk(std::move(buckets));
+ }
+ } while (is_truncated && !done);
+
+send_end:
+ if (!started) {
+ send_response_begin(false);
+ }
+ send_response_end();
+}
+
+void RGWGetUsage::execute(optional_yield y)
+{
+ uint64_t start_epoch = 0;
+ uint64_t end_epoch = (uint64_t)-1;
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
+
+ if (!start_date.empty()) {
+ op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
+ return;
+ }
+ }
+
+ if (!end_date.empty()) {
+ op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
+ return;
+ }
+ }
+
+ uint32_t max_entries = 1000;
+
+ bool is_truncated = true;
+
+ RGWUsageIter usage_iter;
+
+ while (s->bucket && is_truncated) {
+ op_ret = s->bucket->read_usage(this, start_epoch, end_epoch, max_entries, &is_truncated,
+ usage_iter, usage);
+ if (op_ret == -ENOENT) {
+ op_ret = 0;
+ is_truncated = false;
+ }
+
+ if (op_ret < 0) {
+ return;
+ }
+ }
+
+ op_ret = rgw_user_sync_all_stats(this, store, s->user.get(), y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
+ return;
+ }
+
+ op_ret = rgw_user_get_all_buckets_stats(this, store, s->user.get(), buckets_usage, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
+ return;
+ }
+
+ op_ret = s->user->read_stats(this, y, &stats);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
+ return;
+ }
+
+ return;
+}
+
+int RGWStatAccount::verify_permission(optional_yield y)
+{
+ if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWStatAccount::execute(optional_yield y)
+{
+ string marker;
+ rgw::sal::BucketList buckets;
+ uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
+ const string *lastmarker;
+
+ do {
+
+ lastmarker = nullptr;
+ op_ret = s->user->list_buckets(this, marker, string(), max_buckets, true, buckets, y);
+ if (op_ret < 0) {
+ /* hmm.. something wrong here.. the user was authenticated, so it
+ should exist */
+ ldpp_dout(this, 10) << "WARNING: failed on list_buckets uid="
+ << s->user->get_id() << " ret=" << op_ret << dendl;
+ break;
+ } else {
+ /* We need to have stats for all our policies - even if a given policy
+ * isn't actually used in a given account. In such situation its usage
+ * stats would be simply full of zeros. */
+ for (const auto& policy : store->get_zone()->get_zonegroup().placement_targets) {
+ policies_stats.emplace(policy.second.name,
+ decltype(policies_stats)::mapped_type());
+ }
+
+ std::map<std::string, std::unique_ptr<rgw::sal::Bucket>>& m = buckets.get_buckets();
+ for (const auto& kv : m) {
+ const auto& bucket = kv.second;
+ lastmarker = &kv.first;
+
+ global_stats.bytes_used += bucket->get_size();
+ global_stats.bytes_used_rounded += bucket->get_size_rounded();
+ global_stats.objects_count += bucket->get_count();
+
+ /* operator[] still can create a new entry for storage policy seen
+ * for first time. */
+ auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
+ policy_stats.bytes_used += bucket->get_size();
+ policy_stats.bytes_used_rounded += bucket->get_size_rounded();
+ policy_stats.buckets_count++;
+ policy_stats.objects_count += bucket->get_count();
+ }
+ global_stats.buckets_count += m.size();
+
+ }
+ if (!lastmarker) {
+ ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker="
+ << marker << " uid=" << s->user->get_id() << dendl;
+ break;
+ }
+ marker = *lastmarker;
+ } while (buckets.is_truncated());
+}
+
+int RGWGetBucketVersioning::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
+}
+
+void RGWGetBucketVersioning::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWGetBucketVersioning::execute(optional_yield y)
+{
+ if (! s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ versioned = s->bucket->versioned();
+ versioning_enabled = s->bucket->versioning_enabled();
+ mfa_enabled = s->bucket->get_info().mfa_enabled();
+}
+
+int RGWSetBucketVersioning::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
+}
+
+void RGWSetBucketVersioning::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWSetBucketVersioning::execute(optional_yield y)
+{
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
+
+ if (! s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ if (s->bucket->get_info().obj_lock_enabled() && versioning_status != VersioningEnabled) {
+ s->err.message = "bucket versioning cannot be disabled on buckets with object lock enabled";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_BUCKET_STATE;
+ return;
+ }
+
+ bool cur_mfa_status = s->bucket->get_info().mfa_enabled();
+
+ mfa_set_status &= (mfa_status != cur_mfa_status);
+
+ if (mfa_set_status &&
+ !s->mfa_verified) {
+ op_ret = -ERR_MFA_REQUIRED;
+ return;
+ }
+ //if mfa is enabled for bucket, make sure mfa code is validated in case versioned status gets changed
+ if (cur_mfa_status) {
+ bool req_versioning_status = false;
+ //if requested versioning status is not the same as the one set for the bucket, return error
+ if (versioning_status == VersioningEnabled) {
+ req_versioning_status = (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) != 0;
+ } else if (versioning_status == VersioningSuspended) {
+ req_versioning_status = (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) == 0;
+ }
+ if (req_versioning_status && !s->mfa_verified) {
+ op_ret = -ERR_MFA_REQUIRED;
+ return;
+ }
+ }
+
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ bool modified = mfa_set_status;
+
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&] {
+ if (mfa_set_status) {
+ if (mfa_status) {
+ s->bucket->get_info().flags |= BUCKET_MFA_ENABLED;
+ } else {
+ s->bucket->get_info().flags &= ~BUCKET_MFA_ENABLED;
+ }
+ }
+
+ if (versioning_status == VersioningEnabled) {
+ s->bucket->get_info().flags |= BUCKET_VERSIONED;
+ s->bucket->get_info().flags &= ~BUCKET_VERSIONS_SUSPENDED;
+ modified = true;
+ } else if (versioning_status == VersioningSuspended) {
+ s->bucket->get_info().flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
+ modified = true;
+ } else {
+ return op_ret;
+ }
+ s->bucket->set_attrs(rgw::sal::Attrs(s->bucket_attrs));
+ return s->bucket->put_info(this, false, real_time());
+ });
+
+ if (!modified) {
+ return;
+ }
+
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ return;
+ }
+}
+
+int RGWGetBucketWebsite::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
+}
+
+void RGWGetBucketWebsite::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWGetBucketWebsite::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().has_website) {
+ op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
+ }
+}
+
+int RGWSetBucketWebsite::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
+}
+
+void RGWSetBucketWebsite::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWSetBucketWebsite::execute(optional_yield y)
+{
+ op_ret = get_params(y);
+
+ if (op_ret < 0)
+ return;
+
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ s->bucket->get_info().has_website = true;
+ s->bucket->get_info().website_conf = website_conf;
+ op_ret = s->bucket->put_info(this, false, real_time());
+ return op_ret;
+ });
+
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ return;
+ }
+}
+
+int RGWDeleteBucketWebsite::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
+}
+
+void RGWDeleteBucketWebsite::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWDeleteBucketWebsite::execute(optional_yield y)
+{
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ bufferlist in_data;
+
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name()
+ << "returned err=" << op_ret << dendl;
+ return;
+ }
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ s->bucket->get_info().has_website = false;
+ s->bucket->get_info().website_conf = RGWBucketWebsiteConf();
+ op_ret = s->bucket->put_info(this, false, real_time());
+ return op_ret;
+ });
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket
+ << " returned err=" << op_ret << dendl;
+ return;
+ }
+}
+
+int RGWStatBucket::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWStatBucket::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWStatBucket::execute(optional_yield y)
+{
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ op_ret = store->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y);
+ if (op_ret) {
+ return;
+ }
+ op_ret = bucket->update_container_stats(s);
+}
+
+int RGWListBucket::verify_permission(optional_yield y)
+{
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+ if (!prefix.empty())
+ s->env.emplace("s3:prefix", prefix);
+
+ if (!delimiter.empty())
+ s->env.emplace("s3:delimiter", delimiter);
+
+ s->env.emplace("s3:max-keys", std::to_string(max));
+
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this,
+ s,
+ list_versions ?
+ rgw::IAM::s3ListBucketVersions :
+ rgw::IAM::s3ListBucket)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+int RGWListBucket::parse_max_keys()
+{
+ // Bound max value of max-keys to configured value for security
+ // Bound min value of max-keys to '0'
+ // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
+ // empty without listing any items.
+ return parse_value_and_bound(max_keys, max, 0,
+ g_conf().get_val<uint64_t>("rgw_max_listing_results"),
+ default_max);
+}
+
+void RGWListBucket::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWListBucket::execute(optional_yield y)
+{
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ if (allow_unordered && !delimiter.empty()) {
+ ldpp_dout(this, 0) <<
+ "ERROR: unordered bucket listing requested with a delimiter" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ if (need_container_stats()) {
+ op_ret = s->bucket->update_container_stats(s);
+ }
+
+ rgw::sal::Bucket::ListParams params;
+ params.prefix = prefix;
+ params.delim = delimiter;
+ params.marker = marker;
+ params.end_marker = end_marker;
+ params.list_versions = list_versions;
+ params.allow_unordered = allow_unordered;
+ params.shard_id = shard_id;
+
+ rgw::sal::Bucket::ListResults results;
+
+ op_ret = s->bucket->list(this, params, max, results, y);
+ if (op_ret >= 0) {
+ next_marker = results.next_marker;
+ is_truncated = results.is_truncated;
+ objs = std::move(results.objs);
+ common_prefixes = std::move(results.common_prefixes);
+ }
+}
+
+int RGWGetBucketLogging::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
+}
+
+int RGWGetBucketLocation::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
+}
+
+int RGWCreateBucket::verify_permission(optional_yield y)
+{
+ /* This check is mostly needed for S3 that doesn't support account ACL.
+ * Swift doesn't allow to delegate any permission to an anonymous user,
+ * so it will become an early exit in such case. */
+ if (s->auth.identity->is_anonymous()) {
+ return -EACCES;
+ }
+
+ rgw_bucket bucket;
+ bucket.name = s->bucket_name;
+ bucket.tenant = s->bucket_tenant;
+ ARN arn = ARN(bucket);
+ if (!verify_user_permission(this, s, arn, rgw::IAM::s3CreateBucket)) {
+ return -EACCES;
+ }
+
+ if (s->user->get_tenant() != s->bucket_tenant) {
+ //AssumeRole is meant for cross account access
+ if (s->auth.identity->get_identity_type() != TYPE_ROLE) {
+ ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
+ << " (user_id.tenant=" << s->user->get_tenant()
+ << " requested=" << s->bucket_tenant << ")"
+ << dendl;
+ return -EACCES;
+ }
+ }
+
+ if (s->user->get_max_buckets() < 0) {
+ return -EPERM;
+ }
+
+ if (s->user->get_max_buckets()) {
+ rgw::sal::BucketList buckets;
+ string marker;
+ op_ret = s->user->list_buckets(this, marker, string(), s->user->get_max_buckets(),
+ false, buckets, y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ if ((int)buckets.count() >= s->user->get_max_buckets()) {
+ return -ERR_TOO_MANY_BUCKETS;
+ }
+ }
+
+ return 0;
+}
+
+void RGWCreateBucket::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
+ map<string, bufferlist>& out_attrs,
+ map<string, bufferlist>& out_rmattrs)
+{
+ for (const auto& kv : orig_attrs) {
+ const string& name = kv.first;
+
+ /* Check if the attr is user-defined metadata item. */
+ if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
+ RGW_ATTR_META_PREFIX) == 0) {
+ /* For the objects all existing meta attrs have to be removed. */
+ out_rmattrs[name] = kv.second;
+ } else if (out_attrs.find(name) == std::end(out_attrs)) {
+ out_attrs[name] = kv.second;
+ }
+ }
+}
+
+/* Fuse resource metadata basing on original attributes in @orig_attrs, set
+ * of _custom_ attribute names to remove in @rmattr_names and attributes in
+ * @out_attrs. Place results in @out_attrs.
+ *
+ * NOTE: it's supposed that all special attrs already present in @out_attrs
+ * will be preserved without any change. Special attributes are those which
+ * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
+ * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
+static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
+ const set<string>& rmattr_names,
+ map<string, bufferlist>& out_attrs)
+{
+ for (const auto& kv : orig_attrs) {
+ const string& name = kv.first;
+
+ /* Check if the attr is user-defined metadata item. */
+ if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
+ RGW_ATTR_META_PREFIX) == 0) {
+ /* For the buckets all existing meta attrs are preserved,
+ except those that are listed in rmattr_names. */
+ if (rmattr_names.find(name) != std::end(rmattr_names)) {
+ const auto aiter = out_attrs.find(name);
+
+ if (aiter != std::end(out_attrs)) {
+ out_attrs.erase(aiter);
+ }
+ } else {
+ /* emplace() won't alter the map if the key is already present.
+ * This behaviour is fully intensional here. */
+ out_attrs.emplace(kv);
+ }
+ } else if (out_attrs.find(name) == std::end(out_attrs)) {
+ out_attrs[name] = kv.second;
+ }
+ }
+}
+
+
+static void populate_with_generic_attrs(const req_state * const s,
+ map<string, bufferlist>& out_attrs)
+{
+ for (const auto& kv : s->generic_attrs) {
+ bufferlist& attrbl = out_attrs[kv.first];
+ const string& val = kv.second;
+ attrbl.clear();
+ attrbl.append(val.c_str(), val.size() + 1);
}
+}
- op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
- if (op_ret < 0)
- goto done_err;
- total_len = (ofs <= end ? end + 1 - ofs : 0);
- attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
- if (attr_iter != attrs.end() && !skip_manifest) {
- op_ret = handle_user_manifest(attr_iter->second.c_str());
- if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
- << op_ret << dendl;
- goto done_err;
+static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
+ const std::set<std::string>& rmattr_names,
+ RGWQuotaInfo& quota,
+ bool * quota_extracted = nullptr)
+{
+ bool extracted = false;
+
+ /* Put new limit on max objects. */
+ auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
+ std::string err;
+ if (std::end(add_attrs) != iter) {
+ quota.max_objects =
+ static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
+ if (!err.empty()) {
+ return -EINVAL;
}
- return;
+ add_attrs.erase(iter);
+ extracted = true;
}
- attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
- if (attr_iter != attrs.end() && !skip_manifest) {
- is_slo = true;
- op_ret = handle_slo_manifest(attr_iter->second);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
- << dendl;
- goto done_err;
+ /* Put new limit on bucket (container) size. */
+ iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
+ if (iter != add_attrs.end()) {
+ quota.max_size =
+ static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
+ if (!err.empty()) {
+ return -EINVAL;
}
- return;
+ add_attrs.erase(iter);
+ extracted = true;
}
- /* Check whether the object has expired. Swift API documentation
- * stands that we should return 404 Not Found in such case. */
- if (need_object_expiration() && object_is_expired(attrs)) {
- op_ret = -ENOENT;
- goto done_err;
+ for (const auto& name : rmattr_names) {
+ /* Remove limit on max objects. */
+ if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
+ quota.max_objects = -1;
+ extracted = true;
+ }
+
+ /* Remove limit on max bucket size. */
+ if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
+ quota.max_size = -1;
+ extracted = true;
+ }
}
- start = ofs;
+ /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
+ quota.check_on_raw = true;
+ quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
- /* STAT ops don't need data, and do no i/o */
- if (get_type() == RGW_OP_STAT_OBJ) {
- return;
+ if (quota_extracted) {
+ *quota_extracted = extracted;
}
- attr_iter = attrs.find(RGW_ATTR_MANIFEST);
- op_ret = this->get_decrypt_filter(&decrypt, filter,
- attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
- if (decrypt != nullptr) {
- filter = decrypt.get();
+ return 0;
+}
+
+
+static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
+ const std::set<std::string>& rmattr_names,
+ RGWBucketWebsiteConf& ws_conf)
+{
+ std::string lstval;
+
+ /* Let's define a mapping between each custom attribute and the memory where
+ * attribute's value should be stored. The memory location is expressed by
+ * a non-const reference. */
+ const auto mapping = {
+ std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
+ std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
+ std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
+ std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
+ std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
+ };
+
+ for (const auto& kv : mapping) {
+ const char * const key = kv.first;
+ auto& target = kv.second;
+
+ auto iter = add_attrs.find(key);
+
+ if (std::end(add_attrs) != iter) {
+ /* The "target" is a reference to ws_conf. */
+ target = iter->second.c_str();
+ add_attrs.erase(iter);
+ }
+
+ if (rmattr_names.count(key)) {
+ target = std::string();
+ }
}
- if (op_ret < 0) {
- goto done_err;
+
+ if (! lstval.empty()) {
+ ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
}
+}
- if (!get_data || ofs > end) {
- send_response_data(bl, 0, 0);
+
+void RGWCreateBucket::execute(optional_yield y)
+{
+ buffer::list aclbl;
+ buffer::list corsbl;
+ string bucket_name = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
+ rgw_raw_obj obj(store->get_zone()->get_params().domain_root, bucket_name);
+
+ op_ret = get_params(y);
+ if (op_ret < 0)
return;
+
+ if (!relaxed_region_enforcement &&
+ !location_constraint.empty() &&
+ !store->get_zone()->has_zonegroup_api(location_constraint)) {
+ ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
+ << " can't be found." << dendl;
+ op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
+ s->err.message = "The specified location-constraint is not valid";
+ return;
}
- perfcounter->inc(l_rgw_get_b, end - ofs);
+ if (!relaxed_region_enforcement && !store->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
+ store->get_zone()->get_zonegroup().api_name != location_constraint) {
+ ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
+ << " doesn't match zonegroup" << " (" << store->get_zone()->get_zonegroup().api_name << ")"
+ << dendl;
+ op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
+ s->err.message = "The specified location-constraint is not valid";
+ return;
+ }
- ofs_x = ofs;
- end_x = end;
- filter->fixup_range(ofs_x, end_x);
- op_ret = read_op.iterate(ofs_x, end_x, filter);
+ const auto& zonegroup = store->get_zone()->get_zonegroup();
+ if (!placement_rule.name.empty() &&
+ !zonegroup.placement_targets.count(placement_rule.name)) {
+ ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
+ << " doesn't exist in the placement targets of zonegroup"
+ << " (" << store->get_zone()->get_zonegroup().api_name << ")" << dendl;
+ op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
+ s->err.message = "The specified placement target does not exist";
+ return;
+ }
- if (op_ret >= 0)
- op_ret = filter->flush();
+ /* we need to make sure we read bucket info, it's not read before for this
+ * specific request */
+ {
+ std::unique_ptr<rgw::sal::Bucket> tmp_bucket;
+ op_ret = store->get_bucket(this, s->user.get(), s->bucket_tenant,
+ s->bucket_name, &tmp_bucket, y);
+ if (op_ret < 0 && op_ret != -ENOENT)
+ return;
+ s->bucket_exists = (op_ret != -ENOENT);
+
+ if (s->bucket_exists) {
+ if (!s->system_request &&
+ store->get_zone()->get_zonegroup().get_id() !=
+ tmp_bucket->get_info().zonegroup) {
+ op_ret = -EEXIST;
+ return;
+ }
+ /* Initialize info from req_state */
+ info = tmp_bucket->get_info();
+ }
+ }
+
+ s->bucket_owner.set_id(s->user->get_id()); /* XXX dang use s->bucket->owner */
+ s->bucket_owner.set_name(s->user->get_display_name());
+
+ string zonegroup_id;
+
+ if (s->system_request) {
+ zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
+ if (zonegroup_id.empty()) {
+ zonegroup_id = store->get_zone()->get_zonegroup().get_id();
+ }
+ } else {
+ zonegroup_id = store->get_zone()->get_zonegroup().get_id();
+ }
+
+ /* Encode special metadata first as we're using std::map::emplace under
+ * the hood. This method will add the new items only if the map doesn't
+ * contain such keys yet. */
+ policy.encode(aclbl);
+ emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
+
+ if (has_cors) {
+ cors_config.encode(corsbl);
+ emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
+ }
+
+ RGWQuotaInfo quota_info;
+ const RGWQuotaInfo * pquota_info = nullptr;
+ if (need_metadata_upload()) {
+ /* It's supposed that following functions WILL NOT change any special
+ * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ if (op_ret < 0) {
+ return;
+ }
+ prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
+ populate_with_generic_attrs(s, attrs);
+
+ op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
+ if (op_ret < 0) {
+ return;
+ } else {
+ pquota_info = "a_info;
+ }
+
+ /* Web site of Swift API. */
+ filter_out_website(attrs, rmattr_names, info.website_conf);
+ info.has_website = !info.website_conf.is_empty();
+ }
+
+ rgw_bucket tmp_bucket;
+ tmp_bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
+ tmp_bucket.name = s->bucket_name;
+
+ /* Handle updates of the metadata for Swift's object versioning. */
+ if (swift_ver_location) {
+ info.swift_ver_location = *swift_ver_location;
+ info.swift_versioning = (! swift_ver_location->empty());
+ }
+
+ /* We're replacing bucket with the newly created one */
+ ldpp_dout(this, 10) << "user=" << s->user << " bucket=" << tmp_bucket << dendl;
+ op_ret = s->user->create_bucket(this, tmp_bucket, zonegroup_id,
+ placement_rule,
+ info.swift_ver_location,
+ pquota_info, policy, attrs, info, ep_objv,
+ true, obj_lock_enabled, &s->bucket_exists, s->info,
+ &s->bucket, y);
+
+ /* continue if EEXIST and create_bucket will fail below. this way we can
+ * recover from a partial create by retrying it. */
+ ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket.get() << dendl;
+
+ if (op_ret)
+ return;
+
+ const bool existed = s->bucket_exists;
+ if (need_metadata_upload() && existed) {
+ /* OK, it looks we lost race with another request. As it's required to
+ * handle metadata fusion and upload, the whole operation becomes very
+ * similar in nature to PutMetadataBucket. However, as the attrs may
+ * changed in the meantime, we have to refresh. */
+ short tries = 0;
+ do {
+ map<string, bufferlist> battrs;
+
+ op_ret = s->bucket->load_bucket(this, y);
+ if (op_ret < 0) {
+ return;
+ } else if (!s->bucket->is_owner(s->user.get())) {
+ /* New bucket doesn't belong to the account we're operating on. */
+ op_ret = -EEXIST;
+ return;
+ } else {
+ s->bucket_attrs = s->bucket->get_attrs();
+ }
+
+ attrs.clear();
+
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ if (op_ret < 0) {
+ return;
+ }
+ prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
+ populate_with_generic_attrs(s, attrs);
+ op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket->get_info().quota);
+ if (op_ret < 0) {
+ return;
+ }
+
+ /* Handle updates of the metadata for Swift's object versioning. */
+ if (swift_ver_location) {
+ s->bucket->get_info().swift_ver_location = *swift_ver_location;
+ s->bucket->get_info().swift_versioning = (! swift_ver_location->empty());
+ }
+
+ /* Web site of Swift API. */
+ filter_out_website(attrs, rmattr_names, s->bucket->get_info().website_conf);
+ s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty();
- perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now() - start_time));
- if (op_ret < 0) {
- goto done_err;
- }
+ /* This will also set the quota on the bucket. */
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ } while (op_ret == -ECANCELED && tries++ < 20);
- op_ret = send_response_data(bl, 0, 0);
- if (op_ret < 0) {
- goto done_err;
+ /* Restore the proper return code. */
+ if (op_ret >= 0) {
+ op_ret = -ERR_BUCKET_EXISTS;
+ }
}
- return;
-
-done_err:
- send_response_data_error();
}
-int RGWGetObj::init_common()
+int RGWDeleteBucket::verify_permission(optional_yield y)
{
- if (range_str) {
- /* range parsed error when prefetch*/
- if (!range_parsed) {
- int r = parse_range(range_str, ofs, end, &partial_content);
- if (r < 0)
- return r;
- }
- }
- if (if_mod) {
- if (parse_time(if_mod, &mod_time) < 0)
- return -EINVAL;
- mod_ptr = &mod_time;
- }
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- if (if_unmod) {
- if (parse_time(if_unmod, &unmod_time) < 0)
- return -EINVAL;
- unmod_ptr = &unmod_time;
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) {
+ return -EACCES;
}
return 0;
}
-int RGWListBuckets::verify_permission()
+void RGWDeleteBucket::pre_exec()
{
- if (!verify_user_permission(s, RGW_PERM_READ)) {
- return -EACCES;
- }
-
- return 0;
+ rgw_bucket_object_pre_exec(s);
}
-int RGWGetUsage::verify_permission()
+void RGWDeleteBucket::execute(optional_yield y)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
+ if (s->bucket_name.empty()) {
+ op_ret = -EINVAL;
+ return;
}
- return 0;
-}
+ if (!s->bucket_exists) {
+ ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+ RGWObjVersionTracker ot;
+ ot.read_version = s->bucket->get_version();
-void RGWListBuckets::execute()
-{
- bool done;
- bool started = false;
- uint64_t total_count = 0;
+ if (s->system_request) {
+ string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
+ string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
+ if (!tag.empty()) {
+ ot.read_version.tag = tag;
+ uint64_t ver;
+ string err;
+ ver = strict_strtol(ver_str.c_str(), 10, &err);
+ if (!err.empty()) {
+ ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ ot.read_version.ver = ver;
+ }
+ }
- uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
+ op_ret = s->bucket->sync_user_stats(this, y);
+ if ( op_ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
+ }
- op_ret = get_params();
+ op_ret = s->bucket->check_empty(this, y);
if (op_ret < 0) {
- goto send_end;
+ return;
}
- if (supports_account_metadata()) {
- op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
- if (op_ret < 0) {
- goto send_end;
+ bufferlist in_data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ /* adjust error, we want to return with NoSuchBucket and not
+ * NoSuchKey */
+ op_ret = -ERR_NO_SUCH_BUCKET;
}
+ return;
}
- is_truncated = false;
- do {
- RGWUserBuckets buckets;
- uint64_t read_count;
- if (limit >= 0) {
- read_count = min(limit - total_count, (uint64_t)max_buckets);
- } else {
- read_count = max_buckets;
- }
+ op_ret = rgw_remove_sse_s3_bucket_key(s);
+ if (op_ret != 0) {
+ // do nothing; it will already have been logged
+ }
- op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
- marker, end_marker, read_count,
- should_get_stats(), &is_truncated,
- get_default_max());
- if (op_ret < 0) {
- /* hmm.. something wrong here.. the user was authenticated, so it
- should exist */
- ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
- << s->user->user_id << dendl;
- break;
- }
- map<string, RGWBucketEnt>& m = buckets.get_buckets();
- map<string, RGWBucketEnt>::iterator iter;
- for (iter = m.begin(); iter != m.end(); ++iter) {
- RGWBucketEnt& bucket = iter->second;
- buckets_size += bucket.size;
- buckets_size_rounded += bucket.size_rounded;
- buckets_objcount += bucket.count;
- }
- buckets_count += m.size();
- total_count += m.size();
+ op_ret = s->bucket->remove_bucket(this, false, false, nullptr, y);
+ if (op_ret < 0 && op_ret == -ECANCELED) {
+ // lost a race, either with mdlog sync or another delete bucket operation.
+ // in either case, we've already called ctl.bucket->unlink_bucket()
+ op_ret = 0;
+ }
- done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
+ return;
+}
- if (!started) {
- send_response_begin(buckets.count() > 0);
- started = true;
+int RGWPutObj::init_processing(optional_yield y) {
+ copy_source = url_decode(s->info.env->get("HTTP_X_AMZ_COPY_SOURCE", ""));
+ copy_source_range = s->info.env->get("HTTP_X_AMZ_COPY_SOURCE_RANGE");
+ size_t pos;
+ int ret;
+
+ /* handle x-amz-copy-source */
+ std::string_view cs_view(copy_source);
+ if (! cs_view.empty()) {
+ if (cs_view[0] == '/')
+ cs_view.remove_prefix(1);
+ copy_source_bucket_name = std::string(cs_view);
+ pos = copy_source_bucket_name.find("/");
+ if (pos == std::string::npos) {
+ ret = -EINVAL;
+ ldpp_dout(this, 5) << "x-amz-copy-source bad format" << dendl;
+ return ret;
+ }
+ copy_source_object_name =
+ copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size());
+ copy_source_bucket_name = copy_source_bucket_name.substr(0, pos);
+#define VERSION_ID_STR "?versionId="
+ pos = copy_source_object_name.find(VERSION_ID_STR);
+ if (pos == std::string::npos) {
+ copy_source_object_name = url_decode(copy_source_object_name);
+ } else {
+ copy_source_version_id =
+ copy_source_object_name.substr(pos + sizeof(VERSION_ID_STR) - 1);
+ copy_source_object_name =
+ url_decode(copy_source_object_name.substr(0, pos));
+ }
+ pos = copy_source_bucket_name.find(":");
+ if (pos == std::string::npos) {
+ // if tenant is not specified in x-amz-copy-source, use tenant of the requester
+ copy_source_tenant_name = s->user->get_tenant();
+ } else {
+ copy_source_tenant_name = copy_source_bucket_name.substr(0, pos);
+ copy_source_bucket_name = copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size());
+ if (copy_source_bucket_name.empty()) {
+ ret = -EINVAL;
+ ldpp_dout(this, 5) << "source bucket name is empty" << dendl;
+ return ret;
+ }
+ }
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ ret = store->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name,
+ &bucket, y);
+ if (ret < 0) {
+ ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret << dendl;
+ if (ret == -ENOENT) {
+ ret = -ERR_NO_SUCH_BUCKET;
+ }
+ return ret;
}
- if (!m.empty()) {
- send_response_data(buckets);
+ ret = bucket->load_bucket(this, y);
+ if (ret < 0) {
+ ldpp_dout(this, 5) << __func__ << "(): load_bucket() returned ret=" << ret << dendl;
+ return ret;
+ }
+ copy_source_bucket_info = bucket->get_info();
- map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
- marker = riter->first;
+ /* handle x-amz-copy-source-range */
+ if (copy_source_range) {
+ string range = copy_source_range;
+ pos = range.find("bytes=");
+ if (pos == std::string::npos || pos != 0) {
+ ret = -EINVAL;
+ ldpp_dout(this, 5) << "x-amz-copy-source-range bad format" << dendl;
+ return ret;
+ }
+ /* 6 is the length of "bytes=" */
+ range = range.substr(pos + 6);
+ pos = range.find("-");
+ if (pos == std::string::npos) {
+ ret = -EINVAL;
+ ldpp_dout(this, 5) << "x-amz-copy-source-range bad format" << dendl;
+ return ret;
+ }
+ string first = range.substr(0, pos);
+ string last = range.substr(pos + 1);
+ if (first.find_first_not_of("0123456789") != std::string::npos ||
+ last.find_first_not_of("0123456789") != std::string::npos) {
+ ldpp_dout(this, 5) << "x-amz-copy-source-range bad format not an integer" << dendl;
+ ret = -EINVAL;
+ return ret;
+ }
+ copy_source_range_fst = strtoull(first.c_str(), NULL, 10);
+ copy_source_range_lst = strtoull(last.c_str(), NULL, 10);
+ if (copy_source_range_fst > copy_source_range_lst) {
+ ret = -ERANGE;
+ ldpp_dout(this, 5) << "x-amz-copy-source-range bad format first number bigger than second" << dendl;
+ return ret;
+ }
}
- } while (is_truncated && !done);
-send_end:
- if (!started) {
- send_response_begin(false);
- }
- send_response_end();
+ } /* copy_source */
+ return RGWOp::init_processing(y);
}
-void RGWGetUsage::execute()
+int RGWPutObj::verify_permission(optional_yield y)
{
- uint64_t start_epoch = 0;
- uint64_t end_epoch = (uint64_t)-1;
- op_ret = get_params();
- if (op_ret < 0)
- return;
-
- if (!start_date.empty()) {
- op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
- if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
- return;
- }
- }
-
- if (!end_date.empty()) {
- op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
- if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
- return;
- }
- }
-
- uint32_t max_entries = 1000;
+ if (! copy_source.empty()) {
- bool is_truncated = true;
+ RGWAccessControlPolicy cs_acl(s->cct);
+ boost::optional<Policy> policy;
+ map<string, bufferlist> cs_attrs;
+ std::unique_ptr<rgw::sal::Bucket> cs_bucket;
+ int ret = store->get_bucket(NULL, copy_source_bucket_info, &cs_bucket);
+ if (ret < 0)
+ return ret;
- RGWUsageIter usage_iter;
-
- while (is_truncated) {
- op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
- &is_truncated, usage_iter, usage);
+ std::unique_ptr<rgw::sal::Object> cs_object =
+ cs_bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id));
- if (op_ret == -ENOENT) {
- op_ret = 0;
- is_truncated = false;
+ cs_object->set_atomic(s->obj_ctx);
+ cs_object->set_prefetch_data(s->obj_ctx);
+
+ /* check source object permissions */
+ if (ret = read_obj_policy(this, store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
+ policy, cs_bucket.get(), cs_object.get(), y, true); ret < 0) {
+ return ret;
}
- if (op_ret < 0) {
- return;
- }
+ /* admin request overrides permission checks */
+ if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
+ if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ //add source object tags for permission evaluation
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, policy, s->iam_user_policies, s->session_policies);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+ auto usr_policy_res = Effect::Pass;
+ rgw::ARN obj_arn(cs_object->get_obj());
+ for (auto& user_policy : s->iam_user_policies) {
+ if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity,
+ cs_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion,
+ obj_arn); usr_policy_res == Effect::Deny)
+ return -EACCES;
+ else if (usr_policy_res == Effect::Allow)
+ break;
+ }
+ rgw::IAM::Effect e = Effect::Pass;
+ if (policy) {
+ rgw::ARN obj_arn(cs_object->get_obj());
+ e = policy->eval(s->env, *s->auth.identity,
+ cs_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion,
+ obj_arn);
+ }
+ if (e == Effect::Deny) {
+ return -EACCES;
+ } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
+ !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
+ RGW_PERM_READ)) {
+ return -EACCES;
+ }
+ rgw_iam_remove_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+ } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
+ RGW_PERM_READ)) {
+ return -EACCES;
+ }
+ }
}
- op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
- if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
- return ;
+ if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls()) {
+ if (s->canned_acl.compare("public-read") ||
+ s->canned_acl.compare("public-read-write") ||
+ s->canned_acl.compare("authenticated-read"))
+ return -EACCES;
}
-
- string user_str = s->user->user_id.to_str();
- op_ret = store->cls_user_get_header(user_str, &header);
+
+ auto op_ret = get_params(y);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
- return ;
+ ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
+ return op_ret;
}
-
- return;
-}
-int RGWStatAccount::verify_permission()
-{
- if (!verify_user_permission(s, RGW_PERM_READ)) {
- return -EACCES;
- }
+ if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ rgw_add_grant_to_iam_environment(s->env, s);
- return 0;
-}
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
-void RGWStatAccount::execute()
-{
- string marker;
- bool is_truncated = false;
- uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
+ if (obj_tags != nullptr && obj_tags->count() > 0){
+ auto tags = obj_tags->get_tags();
+ for (const auto& kv: tags){
+ rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
+ }
+ }
- do {
- RGWUserBuckets buckets;
+ constexpr auto encrypt_attr = "x-amz-server-side-encryption";
+ constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption";
+ auto enc_header = s->info.crypt_attribute_map.find(encrypt_attr);
+ if (enc_header != s->info.crypt_attribute_map.end()){
+ rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second);
+ }
- op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
- string(), max_buckets, true, &is_truncated);
- if (op_ret < 0) {
- /* hmm.. something wrong here.. the user was authenticated, so it
- should exist */
- ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
- << s->user->user_id << dendl;
- break;
- } else {
- map<string, RGWBucketEnt>& m = buckets.get_buckets();
- map<string, RGWBucketEnt>::iterator iter;
- for (iter = m.begin(); iter != m.end(); ++iter) {
- RGWBucketEnt& bucket = iter->second;
- buckets_size += bucket.size;
- buckets_size_rounded += bucket.size_rounded;
- buckets_objcount += bucket.count;
-
- marker = iter->first;
- }
- buckets_count += m.size();
+ constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id";
+ constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id";
+ auto kms_header = s->info.crypt_attribute_map.find(kms_attr);
+ if (kms_header != s->info.crypt_attribute_map.end()){
+ rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second);
+ }
+
+ // Add bucket tags for authorization
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (identity_policy_res == Effect::Deny)
+ return -EACCES;
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ if (s->iam_policy) {
+ ARN obj_arn(s->object->get_obj());
+ e = s->iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject,
+ obj_arn,
+ princ_type);
+ }
+ if (e == Effect::Deny) {
+ return -EACCES;
}
- } while (is_truncated);
-}
-int RGWGetBucketVersioning::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow))
+ return 0;
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow)
+ return 0;
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow)
+ return 0;
+ }
+ return -EACCES;
+ }
+ if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
+ return 0;
+ }
+ }
+
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
return -EACCES;
}
return 0;
}
-void RGWGetBucketVersioning::pre_exec()
+
+void RGWPutObj::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWGetBucketVersioning::execute()
+class RGWPutObj_CB : public RGWGetObj_Filter
+{
+ RGWPutObj *op;
+public:
+ explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
+ ~RGWPutObj_CB() override {}
+
+ int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
+ return op->get_data_cb(bl, bl_ofs, bl_len);
+ }
+};
+
+int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
{
- versioned = s->bucket_info.versioned();
- versioning_enabled = s->bucket_info.versioning_enabled();
+ bufferlist bl_tmp;
+ bl.begin(bl_ofs).copy(bl_len, bl_tmp);
+
+ bl_aux.append(bl_tmp);
+
+ return bl_len;
}
-int RGWSetBucketVersioning::verify_permission()
+int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
- }
+ RGWPutObj_CB cb(this);
+ RGWGetObj_Filter* filter = &cb;
+ boost::optional<RGWGetObj_Decompress> decompress;
+ std::unique_ptr<RGWGetObj_Filter> decrypt;
+ RGWCompressionInfo cs_info;
+ map<string, bufferlist> attrs;
+ int ret = 0;
+
+ uint64_t obj_size;
+ int64_t new_ofs, new_end;
+
+ new_ofs = fst;
+ new_end = lst;
+
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket);
+ if (ret < 0)
+ return ret;
- return 0;
-}
+ std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id));
+ std::unique_ptr<rgw::sal::Object::ReadOp> read_op(obj->get_read_op(s->obj_ctx));
-void RGWSetBucketVersioning::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ ret = read_op->prepare(s->yield, this);
+ if (ret < 0)
+ return ret;
-void RGWSetBucketVersioning::execute()
-{
- op_ret = get_params();
- if (op_ret < 0)
- return;
+ obj_size = obj->get_obj_size();
- if (!store->is_meta_master()) {
- op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
- if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
- return;
- }
+ bool need_decompress;
+ op_ret = rgw_compression_info_from_attrset(obj->get_attrs(), need_decompress, cs_info);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
+ return -EIO;
}
- if (enable_versioning) {
- s->bucket_info.flags |= BUCKET_VERSIONED;
- s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
- } else {
- s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
+ bool partial_content = true;
+ if (need_decompress)
+ {
+ obj_size = cs_info.orig_size;
+ decompress.emplace(s->cct, &cs_info, partial_content, filter);
+ filter = &*decompress;
}
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
- &s->bucket_attrs);
+ auto attr_iter = obj->get_attrs().find(RGW_ATTR_MANIFEST);
+ op_ret = this->get_decrypt_filter(&decrypt,
+ filter,
+ obj->get_attrs(),
+ attr_iter != obj->get_attrs().end() ? &(attr_iter->second) : nullptr);
+ if (decrypt != nullptr) {
+ filter = decrypt.get();
+ }
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
- << " returned err=" << op_ret << dendl;
- return;
+ return op_ret;
}
-}
-int RGWGetBucketWebsite::verify_permission()
-{
- if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
- return -EACCES;
+ ret = obj->range_to_ofs(obj_size, new_ofs, new_end);
+ if (ret < 0)
+ return ret;
- return 0;
-}
+ filter->fixup_range(new_ofs, new_end);
+ ret = read_op->iterate(this, new_ofs, new_end, filter, s->yield);
-void RGWGetBucketWebsite::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ if (ret >= 0)
+ ret = filter->flush();
+
+ bl.claim_append(bl_aux);
+
+ return ret;
}
-void RGWGetBucketWebsite::execute()
+// special handling for compression type = "random" with multipart uploads
+static CompressorRef get_compressor_plugin(const req_state *s,
+ const std::string& compression_type)
{
- if (!s->bucket_info.has_website) {
- op_ret = -ENOENT;
+ if (compression_type != "random") {
+ return Compressor::create(s->cct, compression_type);
}
-}
-int RGWSetBucketWebsite::verify_permission()
-{
- if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
- return -EACCES;
+ bool is_multipart{false};
+ const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
- return 0;
-}
+ if (!is_multipart) {
+ return Compressor::create(s->cct, compression_type);
+ }
-void RGWSetBucketWebsite::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ // use a hash of the multipart upload id so all parts use the same plugin
+ const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
+ if (alg == Compressor::COMP_ALG_NONE) {
+ return nullptr;
+ }
+ return Compressor::create(s->cct, alg);
}
-void RGWSetBucketWebsite::execute()
+void RGWPutObj::execute(optional_yield y)
{
- op_ret = get_params();
+ char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
+ char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
+ char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
+ unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ MD5 hash;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ bufferlist bl, aclbl, bs;
+ int len;
+
+ off_t fst;
+ off_t lst;
- if (op_ret < 0)
+ bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
+ perfcounter->inc(l_rgw_put);
+ // report latency on return
+ auto put_lat = make_scope_guard([&] {
+ perfcounter->tinc(l_rgw_put_lat, s->time_elapsed());
+ });
+
+ op_ret = -EINVAL;
+ if (rgw::sal::Object::empty(s->object.get())) {
return;
+ }
- s->bucket_info.has_website = true;
- s->bucket_info.website_conf = website_conf;
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
+ op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
+ << op_ret << dendl;
return;
}
-}
-int RGWDeleteBucketWebsite::verify_permission()
-{
- if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
- return -EACCES;
+ if (supplied_md5_b64) {
+ need_calc_md5 = true;
- return 0;
-}
+ ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
+ op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
+ supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
+ ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
+ if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
+ op_ret = -ERR_INVALID_DIGEST;
+ return;
+ }
-void RGWDeleteBucketWebsite::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
+ ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
+ }
-void RGWDeleteBucketWebsite::execute()
-{
- s->bucket_info.has_website = false;
- s->bucket_info.website_conf = RGWBucketWebsiteConf();
+ if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
+ we also check sizes at the end anyway */
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->content_length, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
+ return;
+ }
+ }
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
- return;
+ if (supplied_etag) {
+ strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
+ supplied_md5[sizeof(supplied_md5) - 1] = '\0';
}
-}
-int RGWStatBucket::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_READ)) {
- return -EACCES;
+ const bool multipart = !multipart_upload_id.empty();
+ auto& obj_ctx = *static_cast<RGWObjectCtx*>(s->obj_ctx);
+
+ /* Handle object versioning of Swift API. */
+ if (! multipart) {
+ op_ret = s->object->swift_versioning_copy(s->obj_ctx, this, s->yield);
+ if (op_ret < 0) {
+ return;
+ }
}
- return 0;
-}
+ // make reservation for notification if needed
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(
+ s->object.get(), s->src_object.get(), s,
+ rgw::notify::ObjectCreatedPut);
+ if(!multipart) {
+ op_ret = res->publish_reserve(this, obj_tags.get());
+ if (op_ret < 0) {
+ return;
+ }
+ }
-void RGWStatBucket::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ // create the object processor
+ auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
+ s->yield);
+ std::unique_ptr<rgw::sal::Writer> processor;
-void RGWStatBucket::execute()
-{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ rgw_placement_rule *pdest_placement = &s->dest_placement;
+
+ if (multipart) {
+ std::unique_ptr<rgw::sal::MultipartUpload> upload;
+ upload = s->bucket->get_multipart_upload(s->object->get_name(),
+ multipart_upload_id);
+ op_ret = upload->get_info(this, s->yield, s->obj_ctx, &pdest_placement);
+
+ s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, multipart_upload_id);
+ multipart_trace = tracing::rgw::tracer.add_span(name(), upload->get_trace());
+
+ if (op_ret < 0) {
+ if (op_ret != -ENOENT) {
+ ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl;
+ } else {// -ENOENT: raced with upload complete/cancel, no need to spam log
+ ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl;
+ }
+ return;
+ }
+ /* upload will go out of scope, so copy the dest placement for later use */
+ s->dest_placement = *pdest_placement;
+ pdest_placement = &s->dest_placement;
+ ldpp_dout(this, 20) << "dest_placement for part=" << *pdest_placement << dendl;
+ processor = upload->get_writer(this, s->yield, s->object->clone(),
+ s->user->get_id(), obj_ctx, pdest_placement,
+ multipart_part_num, multipart_part_str);
+ } else if(append) {
+ if (s->bucket->versioned()) {
+ op_ret = -ERR_INVALID_BUCKET_STATE;
+ return;
+ }
+ processor = store->get_append_writer(this, s->yield, s->object->clone(),
+ s->bucket_owner.get_id(), obj_ctx,
+ pdest_placement, s->req_id, position,
+ &cur_accounted_size);
+ } else {
+ if (s->bucket->versioning_enabled()) {
+ if (!version_id.empty()) {
+ s->object->set_instance(version_id);
+ } else {
+ s->object->gen_rand_obj_instance_name();
+ version_id = s->object->get_instance();
+ }
+ }
+ processor = store->get_atomic_writer(this, s->yield, s->object->clone(),
+ s->bucket_owner.get_id(), obj_ctx,
+ pdest_placement, olh_epoch, s->req_id);
+ }
+
+ op_ret = processor->prepare(s->yield);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
+ << dendl;
return;
}
+ if ((! copy_source.empty()) && !copy_source_range) {
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ op_ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to get bucket with error" << op_ret << dendl;
+ return;
+ }
+ std::unique_ptr<rgw::sal::Object> obj =
+ bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id));
- RGWUserBuckets buckets;
- bucket.bucket = s->bucket;
- buckets.add(bucket);
- map<string, RGWBucketEnt>& m = buckets.get_buckets();
- op_ret = store->update_containers_stats(m);
- if (! op_ret)
- op_ret = -EEXIST;
- if (op_ret > 0) {
- op_ret = 0;
- map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
- if (iter != m.end()) {
- bucket = iter->second;
- } else {
- op_ret = -EINVAL;
+ RGWObjState *astate;
+ op_ret = obj->get_obj_state(this, &obj_ctx, &astate, s->yield);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
+ return;
+ }
+ bufferlist bl;
+ if (astate->get_attr(RGW_ATTR_MANIFEST, bl)) {
+ RGWObjManifest m;
+ decode(m, bl);
+ if (m.get_tier_type() == "cloud-s3") {
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ ldpp_dout(this, 0) << "ERROR: Cannot copy cloud tiered object. Failing with "
+ << op_ret << dendl;
+ return;
+ }
}
- }
-}
-int RGWListBucket::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_READ)) {
- return -EACCES;
+ if (!astate->exists){
+ op_ret = -ENOENT;
+ return;
+ }
+ lst = astate->accounted_size - 1;
+ } else {
+ lst = copy_source_range_lst;
}
+ fst = copy_source_range_fst;
- return 0;
-}
+ // no filters by default
+ rgw::sal::DataProcessor *filter = processor.get();
-int RGWListBucket::parse_max_keys()
-{
- if (!max_keys.empty()) {
- char *endptr;
- max = strtol(max_keys.c_str(), &endptr, 10);
- if (endptr) {
- while (*endptr && isspace(*endptr)) // ignore white space
- endptr++;
- if (*endptr) {
- return -EINVAL;
+ const auto& compression_type = store->get_zone()->get_params().get_compression_type(*pdest_placement);
+ CompressorRef plugin;
+ boost::optional<RGWPutObj_Compress> compressor;
+
+ std::unique_ptr<rgw::sal::DataProcessor> encrypt;
+
+ if (!append) { // compression and encryption only apply to full object uploads
+ op_ret = get_encrypt_filter(&encrypt, filter);
+ if (op_ret < 0) {
+ return;
+ }
+ if (encrypt != nullptr) {
+ filter = &*encrypt;
+ } else if (compression_type != "none") {
+ plugin = get_compressor_plugin(s, compression_type);
+ if (!plugin) {
+ ldpp_dout(this, 1) << "Cannot load plugin for compression type "
+ << compression_type << dendl;
+ } else {
+ compressor.emplace(s->cct, plugin, filter);
+ filter = &*compressor;
+ // always send incompressible hint when rgw is itself doing compression
+ s->object->set_compressed(s->obj_ctx);
}
}
- } else {
- max = default_max;
}
+ tracepoint(rgw_op, before_data_transfer, s->req_id.c_str());
+ do {
+ bufferlist data;
+ if (fst > lst)
+ break;
+ if (copy_source.empty()) {
+ len = get_data(data);
+ } else {
+ off_t cur_lst = min<off_t>(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
+ op_ret = get_data(fst, cur_lst, data);
+ if (op_ret < 0)
+ return;
+ len = data.length();
+ s->content_length += len;
+ fst += len;
+ }
+ if (len < 0) {
+ op_ret = len;
+ ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
+ return;
+ } else if (len == 0) {
+ break;
+ }
- return 0;
-}
+ if (need_calc_md5) {
+ hash.Update((const unsigned char *)data.c_str(), data.length());
+ }
-void RGWListBucket::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ /* update torrrent */
+ torrent.update(data);
-void RGWListBucket::execute()
-{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = filter->process(std::move(data), ofs);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "processor->process() returned ret="
+ << op_ret << dendl;
+ return;
+ }
+
+ ofs += len;
+ } while (len > 0);
+ tracepoint(rgw_op, after_data_transfer, s->req_id.c_str(), ofs);
+
+ // flush any data in filters
+ op_ret = filter->process({}, ofs);
+ if (op_ret < 0) {
return;
}
- op_ret = get_params();
- if (op_ret < 0)
+ if (!chunked_upload && ofs != s->content_length) {
+ op_ret = -ERR_REQUEST_TIMEOUT;
return;
+ }
+ s->obj_size = ofs;
+ s->object->set_obj_size(ofs);
- if (need_container_stats()) {
- map<string, RGWBucketEnt> m;
- m[s->bucket.name] = RGWBucketEnt();
- m.begin()->second.bucket = s->bucket;
- op_ret = store->update_containers_stats(m);
- if (op_ret > 0) {
- bucket = m.begin()->second;
- }
+ perfcounter->inc(l_rgw_put_b, s->obj_size);
+
+ op_ret = do_aws4_auth_completion();
+ if (op_ret < 0) {
+ return;
}
- RGWRados::Bucket target(store, s->bucket_info);
- if (shard_id >= 0) {
- target.set_shard_id(shard_id);
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->obj_size, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
+ return;
}
- RGWRados::Bucket::List list_op(&target);
- list_op.params.prefix = prefix;
- list_op.params.delim = delimiter;
- list_op.params.marker = marker;
- list_op.params.end_marker = end_marker;
- list_op.params.list_versions = list_versions;
+ hash.Final(m);
- op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
- if (op_ret >= 0 && !delimiter.empty()) {
- next_marker = list_op.get_next_marker();
+ if (compressor && compressor->is_compressed()) {
+ bufferlist tmp;
+ RGWCompressionInfo cs_info;
+ cs_info.compression_type = plugin->get_type_name();
+ cs_info.orig_size = s->obj_size;
+ cs_info.compressor_message = compressor->get_compressor_message();
+ cs_info.blocks = move(compressor->get_compression_blocks());
+ encode(cs_info, tmp);
+ attrs[RGW_ATTR_COMPRESSION] = tmp;
+ ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
+ << " with type=" << cs_info.compression_type
+ << ", orig_size=" << cs_info.orig_size
+ << ", blocks=" << cs_info.blocks.size() << dendl;
}
-}
-int RGWGetBucketLogging::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
+ buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
+
+ etag = calc_md5;
+
+ if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
+ op_ret = -ERR_BAD_DIGEST;
+ return;
}
- return 0;
-}
+ policy.encode(aclbl);
+ emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
-int RGWGetBucketLocation::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
+ if (dlo_manifest) {
+ op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
+ return;
+ }
}
- return 0;
-}
+ if (slo_info) {
+ bufferlist manifest_bl;
+ encode(*slo_info, manifest_bl);
+ emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
+ }
-int RGWCreateBucket::verify_permission()
-{
- /* This check is mostly needed for S3 that doesn't support account ACL.
- * Swift doesn't allow to delegate any permission to an anonymous user,
- * so it will become an early exit in such case. */
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
+ if (supplied_etag && etag.compare(supplied_etag) != 0) {
+ op_ret = -ERR_UNPROCESSABLE_ENTITY;
+ return;
}
+ bl.append(etag.c_str(), etag.size());
+ emplace_attr(RGW_ATTR_ETAG, std::move(bl));
- if (!verify_user_permission(s, RGW_PERM_WRITE)) {
- return -EACCES;
+ populate_with_generic_attrs(s, attrs);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ if (op_ret < 0) {
+ return;
}
+ encode_delete_at_attr(delete_at, attrs);
+ encode_obj_tags_attr(obj_tags.get(), attrs);
+ rgw_cond_decode_objtags(s, attrs);
- if (s->user->user_id.tenant != s->bucket_tenant) {
- ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
- << " (user_id.tenant=" << s->user->user_id.tenant
- << " requested=" << s->bucket_tenant << ")"
- << dendl;
- return -EACCES;
+ /* Add a custom metadata to expose the information whether an object
+ * is an SLO or not. Appending the attribute must be performed AFTER
+ * processing any input from user in order to prohibit overwriting. */
+ if (slo_info) {
+ bufferlist slo_userindicator_bl;
+ slo_userindicator_bl.append("True", 4);
+ emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
}
- if (s->user->max_buckets < 0) {
- return -EPERM;
+ if (obj_legal_hold) {
+ bufferlist obj_legal_hold_bl;
+ obj_legal_hold->encode(obj_legal_hold_bl);
+ emplace_attr(RGW_ATTR_OBJECT_LEGAL_HOLD, std::move(obj_legal_hold_bl));
+ }
+ if (obj_retention) {
+ bufferlist obj_retention_bl;
+ obj_retention->encode(obj_retention_bl);
+ emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
}
- if (s->user->max_buckets) {
- RGWUserBuckets buckets;
- string marker;
- bool is_truncated = false;
- op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
- marker, string(), s->user->max_buckets,
- false, &is_truncated);
- if (op_ret < 0) {
- return op_ret;
- }
+ tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str());
+ op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
+ (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
+ (user_data.empty() ? nullptr : &user_data), nullptr, nullptr,
+ s->yield);
+ tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str());
- if ((int)buckets.count() >= s->user->max_buckets) {
- return -ERR_TOO_MANY_BUCKETS;
+ /* produce torrent */
+ if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
+ {
+ torrent.init(s, store);
+ torrent.set_create_date(mtime);
+ op_ret = torrent.complete(y);
+ if (0 != op_ret)
+ {
+ ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
+ return;
}
}
- return 0;
+ // send request to notification manager
+ int ret = res->publish_commit(this, s->obj_size, mtime, etag, s->object->get_instance());
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
+ }
}
-static int forward_request_to_master(struct req_state *s, obj_version *objv,
- RGWRados *store, bufferlist& in_data,
- JSONParser *jp, req_info *forward_info)
+int RGWPostObj::verify_permission(optional_yield y)
{
- if (!store->rest_master_conn) {
- ldout(s->cct, 0) << "rest connection is invalid" << dendl;
- return -EINVAL;
- }
- ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
- bufferlist response;
- string uid_str = s->user->user_id.to_str();
-#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
- int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
- objv, MAX_REST_RESPONSE, &in_data, &response);
- if (ret < 0)
- return ret;
-
- ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
- if (jp && !jp->parse(response.c_str(), response.length())) {
- ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
- return -EINVAL;
- }
-
return 0;
}
-void RGWCreateBucket::pre_exec()
+void RGWPostObj::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
- map<string, bufferlist>& out_attrs,
- map<string, bufferlist>& out_rmattrs)
-{
- for (const auto& kv : orig_attrs) {
- const string& name = kv.first;
-
- /* Check if the attr is user-defined metadata item. */
- if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
- RGW_ATTR_META_PREFIX) == 0) {
- /* For the objects all existing meta attrs have to be removed. */
- out_rmattrs[name] = kv.second;
- } else if (out_attrs.find(name) == std::end(out_attrs)) {
- out_attrs[name] = kv.second;
- }
- }
-}
-
-/* Fuse resource metadata basing on original attributes in @orig_attrs, set
- * of _custom_ attribute names to remove in @rmattr_names and attributes in
- * @out_attrs. Place results in @out_attrs.
- *
- * NOTE: it's supposed that all special attrs already present in @out_attrs
- * will be preserved without any change. Special attributes are those which
- * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
- * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
-static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
- const set<string>& rmattr_names,
- map<string, bufferlist>& out_attrs)
+void RGWPostObj::execute(optional_yield y)
{
- for (const auto& kv : orig_attrs) {
- const string& name = kv.first;
-
- /* Check if the attr is user-defined metadata item. */
- if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
- RGW_ATTR_META_PREFIX) == 0) {
- /* For the buckets all existing meta attrs are preserved,
- except those that are listed in rmattr_names. */
- if (rmattr_names.find(name) != std::end(rmattr_names)) {
- const auto aiter = out_attrs.find(name);
+ boost::optional<RGWPutObj_Compress> compressor;
+ CompressorRef plugin;
+ char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- if (aiter != std::end(out_attrs)) {
- out_attrs.erase(aiter);
- }
- } else {
- /* emplace() won't alter the map if the key is already present.
- * This behaviour is fully intensional here. */
- out_attrs.emplace(kv);
- }
- } else if (out_attrs.find(name) == std::end(out_attrs)) {
- out_attrs[name] = kv.second;
- }
+ /* Read in the data from the POST form. */
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return;
}
-}
-
-static void populate_with_generic_attrs(const req_state * const s,
- map<string, bufferlist>& out_attrs)
-{
- for (const auto& kv : s->generic_attrs) {
- bufferlist& attrbl = out_attrs[kv.first];
- const string& val = kv.second;
- attrbl.clear();
- attrbl.append(val.c_str(), val.size() + 1);
+ op_ret = verify_params();
+ if (op_ret < 0) {
+ return;
}
-}
-
-
-static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
- const std::set<std::string>& rmattr_names,
- RGWQuotaInfo& quota,
- bool * quota_extracted = nullptr)
-{
- bool extracted = false;
- /* Put new limit on max objects. */
- auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
- std::string err;
- if (std::end(add_attrs) != iter) {
- quota.max_objects =
- static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
- if (!err.empty()) {
- return -EINVAL;
+ if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (identity_policy_res == Effect::Deny) {
+ op_ret = -EACCES;
+ return;
}
- add_attrs.erase(iter);
- extracted = true;
- }
- /* Put new limit on bucket (container) size. */
- iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
- if (iter != add_attrs.end()) {
- quota.max_size =
- static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
- if (!err.empty()) {
- return -EINVAL;
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ if (s->iam_policy) {
+ ARN obj_arn(s->object->get_obj());
+ e = s->iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject,
+ obj_arn,
+ princ_type);
}
- add_attrs.erase(iter);
- extracted = true;
- }
-
- for (const auto& name : rmattr_names) {
- /* Remove limit on max objects. */
- if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
- quota.max_objects = -1;
- extracted = true;
+ if (e == Effect::Deny) {
+ op_ret = -EACCES;
+ return;
}
- /* Remove limit on max bucket size. */
- if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
- quota.max_size = -1;
- extracted = true;
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (session_policy_res == Effect::Deny) {
+ op_ret = -EACCES;
+ return;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow)) {
+ op_ret = 0;
+ return;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
+ op_ret = 0;
+ return;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ op_ret = 0;
+ return;
+ }
+ }
+ op_ret = -EACCES;
+ return;
+ }
+ if (identity_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ op_ret = -EACCES;
+ return;
}
+ } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ op_ret = -EACCES;
+ return;
}
- /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
- quota.check_on_raw = true;
- quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
-
- if (quota_extracted) {
- *quota_extracted = extracted;
+ // make reservation for notification if needed
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost);
+ op_ret = res->publish_reserve(this);
+ if (op_ret < 0) {
+ return;
}
- return 0;
-}
-
-
-static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
- const std::set<std::string>& rmattr_names,
- RGWBucketWebsiteConf& ws_conf)
-{
- std::string lstval;
+ /* Start iteration over data fields. It's necessary as Swift's FormPost
+ * is capable to handle multiple files in single form. */
+ do {
+ char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
+ unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ MD5 hash;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ ceph::buffer::list bl, aclbl;
+ int len = 0;
- /* Let's define a mapping between each custom attribute and the memory where
- * attribute's value should be stored. The memory location is expressed by
- * a non-const reference. */
- const auto mapping = {
- std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
- std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
- std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
- std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
- std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
- };
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->content_length, y);
+ if (op_ret < 0) {
+ return;
+ }
- for (const auto& kv : mapping) {
- const char * const key = kv.first;
- auto& target = kv.second;
+ if (supplied_md5_b64) {
+ char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
+ ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
+ op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
+ supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
+ ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
+ if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
+ op_ret = -ERR_INVALID_DIGEST;
+ return;
+ }
- auto iter = add_attrs.find(key);
+ buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
+ ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
+ }
- if (std::end(add_attrs) != iter) {
- /* The "target" is a reference to ws_conf. */
- target = iter->second.c_str();
- add_attrs.erase(iter);
+ std::unique_ptr<rgw::sal::Object> obj =
+ s->bucket->get_object(rgw_obj_key(get_current_filename()));
+ if (s->bucket->versioning_enabled()) {
+ obj->gen_rand_obj_instance_name();
}
- if (rmattr_names.count(key)) {
- target = std::string();
+ auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
+ s->yield);
+
+ std::unique_ptr<rgw::sal::Writer> processor;
+ processor = store->get_atomic_writer(this, s->yield, std::move(obj),
+ s->bucket_owner.get_id(), *s->obj_ctx,
+ &s->dest_placement, 0, s->req_id);
+ op_ret = processor->prepare(s->yield);
+ if (op_ret < 0) {
+ return;
}
- }
- if (! lstval.empty()) {
- ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
- }
-}
+ /* No filters by default. */
+ rgw::sal::DataProcessor *filter = processor.get();
+ std::unique_ptr<rgw::sal::DataProcessor> encrypt;
+ op_ret = get_encrypt_filter(&encrypt, filter);
+ if (op_ret < 0) {
+ return;
+ }
+ if (encrypt != nullptr) {
+ filter = encrypt.get();
+ } else {
+ const auto& compression_type = store->get_zone()->get_params().get_compression_type(
+ s->dest_placement);
+ if (compression_type != "none") {
+ plugin = Compressor::create(s->cct, compression_type);
+ if (!plugin) {
+ ldpp_dout(this, 1) << "Cannot load plugin for compression type "
+ << compression_type << dendl;
+ } else {
+ compressor.emplace(s->cct, plugin, filter);
+ filter = &*compressor;
+ }
+ }
+ }
-void RGWCreateBucket::execute()
-{
- RGWAccessControlPolicy old_policy(s->cct);
- buffer::list aclbl;
- buffer::list corsbl;
- bool existed;
- string bucket_name;
- rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
- rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
- obj_version objv, *pobjv = NULL;
+ bool again;
+ do {
+ ceph::bufferlist data;
+ len = get_data(data, again);
- op_ret = get_params();
- if (op_ret < 0)
- return;
+ if (len < 0) {
+ op_ret = len;
+ return;
+ }
- if (!store->get_zonegroup().is_master &&
- store->get_zonegroup().api_name != location_constraint) {
- ldout(s->cct, 0) << "location constraint (" << location_constraint << ") doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")" << dendl;
- op_ret = -EINVAL;
- return;
- }
+ if (!len) {
+ break;
+ }
- /* we need to make sure we read bucket info, it's not read before for this
- * specific request */
- RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
- op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
- s->bucket_info, NULL, &s->bucket_attrs);
- if (op_ret < 0 && op_ret != -ENOENT)
- return;
- s->bucket_exists = (op_ret != -ENOENT);
-
- s->bucket_owner.set_id(s->user->user_id);
- s->bucket_owner.set_name(s->user->display_name);
- if (s->bucket_exists) {
- int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
- s->bucket_attrs, &old_policy);
- if (r >= 0) {
- if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
- op_ret = -EEXIST;
+ hash.Update((const unsigned char *)data.c_str(), data.length());
+ op_ret = filter->process(std::move(data), ofs);
+ if (op_ret < 0) {
return;
}
- }
- }
- RGWBucketInfo master_info;
- rgw_bucket *pmaster_bucket;
- uint32_t *pmaster_num_shards;
- real_time creation_time;
+ ofs += len;
+
+ if (ofs > max_len) {
+ op_ret = -ERR_TOO_LARGE;
+ return;
+ }
+ } while (again);
- if (!store->is_meta_master()) {
- JSONParser jp;
- op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
+ // flush
+ op_ret = filter->process({}, ofs);
if (op_ret < 0) {
return;
}
- JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
- JSONDecoder::decode_json("object_ver", objv, &jp);
- JSONDecoder::decode_json("bucket_info", master_info, &jp);
- ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
- ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
- pmaster_bucket= &master_info.bucket;
- creation_time = master_info.creation_time;
- pmaster_num_shards = &master_info.num_shards;
- pobjv = &objv;
- } else {
- pmaster_bucket = NULL;
- pmaster_num_shards = NULL;
- }
+ if (len < min_len) {
+ op_ret = -ERR_TOO_SMALL;
+ return;
+ }
- string zonegroup_id;
+ s->obj_size = ofs;
+ s->object->set_obj_size(ofs);
- if (s->system_request) {
- zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
- if (zonegroup_id.empty()) {
- zonegroup_id = store->get_zonegroup().get_id();
+
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->obj_size, y);
+ if (op_ret < 0) {
+ return;
}
- } else {
- zonegroup_id = store->get_zonegroup().get_id();
- }
-
- if (s->bucket_exists) {
- string selected_placement_rule;
- rgw_bucket bucket;
- bucket.tenant = s->bucket_tenant;
- bucket.name = s->bucket_name;
- op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
- placement_rule,
- &selected_placement_rule, nullptr);
- if (selected_placement_rule != s->bucket_info.placement_rule) {
- op_ret = -EEXIST;
+
+ hash.Final(m);
+ buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
+
+ etag = calc_md5;
+
+ if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
+ op_ret = -ERR_BAD_DIGEST;
return;
}
- }
- /* Encode special metadata first as we're using std::map::emplace under
- * the hood. This method will add the new items only if the map doesn't
- * contain such keys yet. */
- policy.encode(aclbl);
- emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
+ bl.append(etag.c_str(), etag.size());
+ emplace_attr(RGW_ATTR_ETAG, std::move(bl));
- if (has_cors) {
- cors_config.encode(corsbl);
- emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
- }
+ policy.encode(aclbl);
+ emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
- RGWQuotaInfo quota_info;
- const RGWQuotaInfo * pquota_info = nullptr;
- if (need_metadata_upload()) {
- /* It's supposed that following functions WILL NOT change any special
- * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
- rgw_get_request_metadata(s->cct, s->info, attrs, false);
- prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
- populate_with_generic_attrs(s, attrs);
+ const std::string content_type = get_current_content_type();
+ if (! content_type.empty()) {
+ ceph::bufferlist ct_bl;
+ ct_bl.append(content_type.c_str(), content_type.size() + 1);
+ emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
+ }
- op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
+ if (compressor && compressor->is_compressed()) {
+ ceph::bufferlist tmp;
+ RGWCompressionInfo cs_info;
+ cs_info.compression_type = plugin->get_type_name();
+ cs_info.orig_size = s->obj_size;
+ cs_info.compressor_message = compressor->get_compressor_message();
+ cs_info.blocks = move(compressor->get_compression_blocks());
+ encode(cs_info, tmp);
+ emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
+ }
+
+ op_ret = processor->complete(s->obj_size, etag, nullptr, real_time(), attrs,
+ (delete_at ? *delete_at : real_time()),
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ s->yield);
if (op_ret < 0) {
return;
- } else {
- pquota_info = "a_info;
}
+ } while (is_next_file_to_upload());
- /* Web site of Swift API. */
- filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
- s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
+ // send request to notification manager
+ int ret = res->publish_commit(this, ofs, s->object->get_mtime(), etag, s->object->get_instance());
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
}
+}
- s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
- s->bucket.name = s->bucket_name;
- /* Handle updates of the metadata for Swift's object versioning. */
- if (swift_ver_location) {
- s->bucket_info.swift_ver_location = *swift_ver_location;
- s->bucket_info.swift_versioning = (! swift_ver_location->empty());
+void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
+ const set<string>& rmattr_names,
+ map<int, string>& temp_url_keys)
+{
+ map<string, bufferlist>::iterator iter;
+
+ iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
+ if (iter != add_attrs.end()) {
+ temp_url_keys[0] = iter->second.c_str();
+ add_attrs.erase(iter);
}
- op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
- placement_rule, s->bucket_info.swift_ver_location,
- pquota_info, attrs,
- info, pobjv, &ep_objv, creation_time,
- pmaster_bucket, pmaster_num_shards, true);
- /* continue if EEXIST and create_bucket will fail below. this way we can
- * recover from a partial create by retrying it. */
- ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
+ iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
+ if (iter != add_attrs.end()) {
+ temp_url_keys[1] = iter->second.c_str();
+ add_attrs.erase(iter);
+ }
- if (op_ret && op_ret != -EEXIST)
- return;
+ for (const string& name : rmattr_names) {
+ if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
+ temp_url_keys[0] = string();
+ }
+ if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
+ temp_url_keys[1] = string();
+ }
+ }
+}
- existed = (op_ret == -EEXIST);
+int RGWPutMetadataAccount::init_processing(optional_yield y)
+{
+ /* First, go to the base class. At the time of writing the method was
+ * responsible only for initializing the quota. This isn't necessary
+ * here as we are touching metadata only. I'm putting this call only
+ * for the future. */
+ op_ret = RGWOp::init_processing(y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
- if (existed) {
- /* bucket already existed, might have raced with another bucket creation, or
- * might be partial bucket creation that never completed. Read existing bucket
- * info, verify that the reported bucket owner is the current user.
- * If all is ok then update the user's list of buckets.
- * Otherwise inform client about a name conflict.
- */
- if (info.owner.compare(s->user->user_id) != 0) {
- op_ret = -EEXIST;
- return;
- }
- s->bucket = info.bucket;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return op_ret;
}
- op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
- info.creation_time, false);
- if (op_ret && !existed && op_ret != -EEXIST) {
- /* if it exists (or previously existed), don't remove it! */
- op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
- s->bucket.name);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
- << dendl;
- }
- } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
- op_ret = -ERR_BUCKET_EXISTS;
+ op_ret = s->user->read_attrs(this, y);
+ if (op_ret < 0) {
+ return op_ret;
}
+ orig_attrs = s->user->get_attrs();
- if (need_metadata_upload() && existed) {
- /* OK, it looks we lost race with another request. As it's required to
- * handle metadata fusion and upload, the whole operation becomes very
- * similar in nature to PutMetadataBucket. However, as the attrs may
- * changed in the meantime, we have to refresh. */
- short tries = 0;
- do {
- RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
- RGWBucketInfo binfo;
- map<string, bufferlist> battrs;
+ if (has_policy) {
+ bufferlist acl_bl;
+ policy.encode(acl_bl);
+ attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
+ }
- op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
- binfo, nullptr, &battrs);
- if (op_ret < 0) {
- return;
- } else if (binfo.owner.compare(s->user->user_id) != 0) {
- /* New bucket doesn't belong to the account we're operating on. */
- op_ret = -EEXIST;
- return;
- } else {
- s->bucket_info = binfo;
- s->bucket_attrs = battrs;
- }
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+ prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
+ populate_with_generic_attrs(s, attrs);
- attrs.clear();
+ /* Try extract the TempURL-related stuff now to allow verify_permission
+ * evaluate whether we need FULL_CONTROL or not. */
+ filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
- rgw_get_request_metadata(s->cct, s->info, attrs, false);
- prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
- populate_with_generic_attrs(s, attrs);
- op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
- if (op_ret < 0) {
- return;
- }
+ /* The same with quota except a client needs to be reseller admin. */
+ op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
+ &new_quota_extracted);
+ if (op_ret < 0) {
+ return op_ret;
+ }
- /* Handle updates of the metadata for Swift's object versioning. */
- if (swift_ver_location) {
- s->bucket_info.swift_ver_location = *swift_ver_location;
- s->bucket_info.swift_versioning = (! swift_ver_location->empty());
- }
+ return 0;
+}
- /* Web site of Swift API. */
- filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
- s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
+int RGWPutMetadataAccount::verify_permission(optional_yield y)
+{
+ if (s->auth.identity->is_anonymous()) {
+ return -EACCES;
+ }
- /* This will also set the quota on the bucket. */
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
- &s->bucket_info.objv_tracker);
- } while (op_ret == -ECANCELED && tries++ < 20);
+ if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ return -EACCES;
+ }
+
+ /* Altering TempURL keys requires FULL_CONTROL. */
+ if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
+ return -EPERM;
+ }
+
+ /* We are failing this intensionally to allow system user/reseller admin
+ * override in rgw_process.cc. This is the way to specify a given RGWOp
+ * expect extra privileges. */
+ if (new_quota_extracted) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWPutMetadataAccount::execute(optional_yield y)
+{
+ /* Params have been extracted earlier. See init_processing(). */
+ op_ret = s->user->load_user(this, y);
+ if (op_ret < 0) {
+ return;
+ }
- /* Restore the proper return code. */
- if (op_ret >= 0) {
- op_ret = -ERR_BUCKET_EXISTS;
+ /* Handle the TempURL-related stuff. */
+ if (!temp_url_keys.empty()) {
+ for (auto& pair : temp_url_keys) {
+ s->user->get_info().temp_url_keys[pair.first] = std::move(pair.second);
}
}
+
+ /* Handle the quota extracted at the verify_permission step. */
+ if (new_quota_extracted) {
+ s->user->get_info().user_quota = std::move(new_quota);
+ }
+
+ /* We are passing here the current (old) user info to allow the function
+ * optimize-out some operations. */
+ s->user->set_attrs(attrs);
+ op_ret = s->user->store_user(this, y, false, &s->user->get_info());
}
-int RGWDeleteBucket::verify_permission()
+int RGWPutMetadataBucket::verify_permission(optional_yield y)
{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE)) {
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
return -EACCES;
}
return 0;
}
-void RGWDeleteBucket::pre_exec()
+void RGWPutMetadataBucket::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWDeleteBucket::execute()
+void RGWPutMetadataBucket::execute(optional_yield y)
{
- op_ret = -EINVAL;
-
- if (s->bucket_name.empty())
- return;
-
- if (!s->bucket_exists) {
- ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
return;
}
- RGWObjVersionTracker ot;
- ot.read_version = s->bucket_info.ep_objv;
- if (s->system_request) {
- string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
- string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
- if (!tag.empty()) {
- ot.read_version.tag = tag;
- uint64_t ver;
- string err;
- ver = strict_strtol(ver_str.c_str(), 10, &err);
- if (!err.empty()) {
- ldout(s->cct, 0) << "failed to parse ver param" << dendl;
- op_ret = -EINVAL;
- return;
- }
- ot.read_version.ver = ver;
- }
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ if (op_ret < 0) {
+ return;
}
- op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
- if ( op_ret < 0) {
- ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
- }
-
- op_ret = store->check_bucket_empty(s->bucket_info);
- if (op_ret < 0) {
+ if (!placement_rule.empty() &&
+ placement_rule != s->bucket->get_placement_rule()) {
+ op_ret = -EEXIST;
return;
}
- if (!store->is_meta_master()) {
- bufferlist in_data;
- op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
- NULL);
- if (op_ret < 0) {
- if (op_ret == -ENOENT) {
- /* adjust error, we want to return with NoSuchBucket and not
- * NoSuchKey */
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ /* Encode special metadata first as we're using std::map::emplace under
+ * the hood. This method will add the new items only if the map doesn't
+ * contain such keys yet. */
+ if (has_policy) {
+ if (s->dialect.compare("swift") == 0) {
+ auto old_policy = \
+ static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
+ auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
+ new_policy->filter_merge(policy_rw_mask, old_policy);
+ policy = *new_policy;
+ }
+ buffer::list bl;
+ policy.encode(bl);
+ emplace_attr(RGW_ATTR_ACL, std::move(bl));
}
- return;
- }
- }
- op_ret = store->delete_bucket(s->bucket_info, ot, false);
+ if (has_cors) {
+ buffer::list bl;
+ cors_config.encode(bl);
+ emplace_attr(RGW_ATTR_CORS, std::move(bl));
+ }
- if (op_ret == -ECANCELED) {
- // lost a race, either with mdlog sync or another delete bucket operation.
- // in either case, we've already called rgw_unlink_bucket()
- op_ret = 0;
- return;
- }
+ /* It's supposed that following functions WILL NOT change any
+ * special attributes (like RGW_ATTR_ACL) if they are already
+ * present in attrs. */
+ prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
+ populate_with_generic_attrs(s, attrs);
- if (op_ret == 0) {
- op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
- s->bucket.name, false);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
- << dendl;
- }
- }
+ /* According to the Swift's behaviour and its container_quota
+ * WSGI middleware implementation: anyone with write permissions
+ * is able to set the bucket quota. This stays in contrast to
+ * account quotas that can be set only by clients holding
+ * reseller admin privileges. */
+ op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket->get_info().quota);
+ if (op_ret < 0) {
+ return op_ret;
+ }
- if (op_ret < 0) {
- return;
- }
+ if (swift_ver_location) {
+ s->bucket->get_info().swift_ver_location = *swift_ver_location;
+ s->bucket->get_info().swift_versioning = (!swift_ver_location->empty());
+ }
+ /* Web site of Swift API. */
+ filter_out_website(attrs, rmattr_names, s->bucket->get_info().website_conf);
+ s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty();
+ /* Setting attributes also stores the provided bucket info. Due
+ * to this fact, the new quota settings can be serialized with
+ * the same call. */
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ return op_ret;
+ });
}
-int RGWPutObj::verify_permission()
+int RGWPutMetadataObject::verify_permission(optional_yield y)
{
- if (copy_source) {
-
- RGWAccessControlPolicy cs_policy(s->cct);
- map<string, bufferlist> cs_attrs;
- rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
- rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
-
- rgw_obj obj(cs_bucket, cs_object);
- store->set_atomic(s->obj_ctx, obj);
- store->set_prefetch_data(s->obj_ctx, obj);
-
- /* check source object permissions */
- if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_policy, cs_bucket, cs_object) < 0) {
- return -EACCES;
- }
-
- /* admin request overrides permission checks */
- if (! s->auth.identity->is_admin_of(cs_policy.get_owner().get_id()) &&
- ! cs_policy.verify_permission(*s->auth.identity, s->perm_mask, RGW_PERM_READ)) {
- return -EACCES;
- }
-
- }
-
- if (!verify_bucket_permission(s, RGW_PERM_WRITE)) {
+ // This looks to be something specific to Swift. We could add
+ // operations like swift:PutMetadataObject to the Policy Engine.
+ if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) {
return -EACCES;
}
return 0;
}
-void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
- *_mp = ∓
+void RGWPutMetadataObject::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
}
-int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
+void RGWPutMetadataObject::execute(optional_yield y)
{
- string oid = obj_str;
- upload_id = s->info.args.get("uploadId");
- if (!oid_rand) {
- mp.init(oid, upload_id);
- } else {
- mp.init(oid, upload_id, *oid_rand);
- }
-
- part_num = s->info.args.get("partNumber");
- if (part_num.empty()) {
- ldout(s->cct, 10) << "part number is empty" << dendl;
- return -EINVAL;
- }
+ rgw_obj target_obj;
+ rgw::sal::Attrs attrs, rmattrs;
- string err;
- uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
+ s->object->set_atomic(s->obj_ctx);
- if (!err.empty()) {
- ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
- return -EINVAL;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return;
}
- string upload_prefix = oid + ".";
-
- if (!oid_rand) {
- upload_prefix.append(upload_id);
- } else {
- upload_prefix.append(*oid_rand);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ if (op_ret < 0) {
+ return;
}
- rgw_obj target_obj;
- target_obj.init(bucket, oid);
-
- manifest.set_prefix(upload_prefix);
-
- manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
+ /* check if obj exists, read orig attrs */
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, s, &target_obj);
+ if (op_ret < 0) {
+ return;
+ }
- int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
- if (r < 0) {
- return r;
+ /* Check whether the object has expired. Swift API documentation
+ * stands that we should return 404 Not Found in such case. */
+ if (need_object_expiration() && s->object->is_expired()) {
+ op_ret = -ENOENT;
+ return;
}
- cur_obj = manifest_gen.get_cur_obj(store);
- rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
- head_obj.index_hash_source = obj_str;
+ /* Filter currently existing attributes. */
+ prepare_add_del_attrs(s->object->get_attrs(), attrs, rmattrs);
+ populate_with_generic_attrs(s, attrs);
+ encode_delete_at_attr(delete_at, attrs);
- r = prepare_init(store, NULL);
- if (r < 0) {
- return r;
+ if (dlo_manifest) {
+ op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
+ return;
+ }
}
- return 0;
+ op_ret = s->object->set_obj_attrs(this, s->obj_ctx, &attrs, &rmattrs, s->yield, &target_obj);
}
-int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
- const string& etag,
- real_time *mtime, real_time set_mtime,
- map<string, bufferlist>& attrs,
- real_time delete_at,
- const char *if_match,
- const char *if_nomatch, const string *user_data)
+int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
{
- complete_writing_data();
-
- RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
- RGWRados::Object::Write head_obj_op(&op_target);
-
- head_obj_op.meta.set_mtime = set_mtime;
- head_obj_op.meta.mtime = mtime;
- head_obj_op.meta.owner = s->owner.get_id();
- head_obj_op.meta.delete_at = delete_at;
+ RGWSLOInfo slo_info;
+ auto bliter = bl.cbegin();
+ try {
+ decode(slo_info, bliter);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
+ return -EIO;
+ }
- int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
- if (r < 0)
- return r;
+ try {
+ deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
+ new RGWBulkDelete::Deleter(this, store, s));
+ } catch (const std::bad_alloc&) {
+ return -ENOMEM;
+ }
- bufferlist bl;
- RGWUploadPartInfo info;
- string p = "part.";
- bool sorted_omap = is_v2_upload_id(upload_id);
+ list<RGWBulkDelete::acct_path_t> items;
+ for (const auto& iter : slo_info.entries) {
+ const string& path_str = iter.path;
- if (sorted_omap) {
- string err;
- int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
- if (!err.empty()) {
- dout(10) << "bad part number specified: " << part_num << dendl;
+ const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
+ if (std::string_view::npos == sep_pos) {
return -EINVAL;
}
- char buf[32];
- snprintf(buf, sizeof(buf), "%08d", part_num_int);
- p.append(buf);
- } else {
- p.append(part_num);
- }
- info.num = atoi(part_num.c_str());
- info.etag = etag;
- info.size = obj_len;
- info.accounted_size = accounted_size;
- info.modified = real_clock::now();
- info.manifest = manifest;
- bool compressed;
- r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
- if (r < 0) {
- dout(1) << "cannot get compression info" << dendl;
- return r;
- }
-
- ::encode(info, bl);
-
- string multipart_meta_obj = mp.get_meta();
+ RGWBulkDelete::acct_path_t path;
- rgw_obj meta_obj;
- meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
- meta_obj.set_in_extra_data(true);
+ path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
+ path.obj_key = url_decode(path_str.substr(sep_pos + 1));
- rgw_raw_obj raw_meta_obj;
+ items.push_back(path);
+ }
- store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
+ /* Request removal of the manifest object itself. */
+ RGWBulkDelete::acct_path_t path;
+ path.bucket_name = s->bucket_name;
+ path.obj_key = s->object->get_key();
+ items.push_back(path);
- r = store->omap_set(raw_meta_obj, p, bl);
+ int ret = deleter->delete_chunk(items, y);
+ if (ret < 0) {
+ return ret;
+ }
- return r;
+ return 0;
}
-RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
+int RGWDeleteObj::verify_permission(optional_yield y)
{
- RGWPutObjProcessor *processor;
+ int op_ret = get_params(y);
+ if (op_ret) {
+ return op_ret;
+ }
- bool multipart = s->info.args.exists("uploadId");
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+
+ if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
+ if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
+ auto r = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name()));
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ } else if (r == Effect::Pass && s->iam_policy) {
+ ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name()));
+ r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, obj_arn);
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ }
+ } else if (r == Effect::Pass && !s->session_policies.empty()) {
+ r = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name()));
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ }
+ }
+ }
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ s->object->get_instance().empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ ARN(s->bucket->get_key(), s->object->get_name()));
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
- uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
+ rgw::IAM::Effect r = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name()));
+ if (s->iam_policy) {
+ r = s->iam_policy->eval(s->env, *s->auth.identity,
+ s->object->get_instance().empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ obj_arn,
+ princ_type);
+ }
+ if (r == Effect::Deny)
+ return -EACCES;
- if (!multipart) {
- processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
- (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
- (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
- } else {
- processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ s->object->get_instance().empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ obj_arn);
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && r == Effect::Allow)) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return 0;
+ }
+ }
+ return -EACCES;
+ }
+ if (r == Effect::Allow || identity_policy_res == Effect::Allow)
+ return 0;
}
- if (is_multipart) {
- *is_multipart = multipart;
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ return -EACCES;
}
- return processor;
-}
+ if (s->bucket->get_info().mfa_enabled() &&
+ !s->object->get_instance().empty() &&
+ !s->mfa_verified) {
+ ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
+ return -ERR_MFA_REQUIRED;
+ }
-void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
-{
- delete processor;
+ return 0;
}
-void RGWPutObj::pre_exec()
+void RGWDeleteObj::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-class RGWPutObj_CB : public RGWGetDataCB
+void RGWDeleteObj::execute(optional_yield y)
{
- RGWPutObj *op;
-public:
- RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
- ~RGWPutObj_CB() override {}
-
- int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
- return op->get_data_cb(bl, bl_ofs, bl_len);
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
}
-};
-
-int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
-{
- bufferlist bl_tmp;
- bl.copy(bl_ofs, bl_len, bl_tmp);
-
- bl_aux.append(bl_tmp);
-
- return bl_len;
-}
-int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
-{
- RGWPutObj_CB cb(this);
- RGWGetDataCB* filter = &cb;
- boost::optional<RGWGetObj_Decompress> decompress;
- std::unique_ptr<RGWGetDataCB> decrypt;
- RGWCompressionInfo cs_info;
- map<string, bufferlist> attrs;
- map<string, bufferlist>::iterator attr_iter;
- int ret = 0;
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ uint64_t obj_size = 0;
+ std::string etag;
+ RGWObjectCtx* obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
+ {
+ RGWObjState* astate = nullptr;
+ bool check_obj_lock = s->object->have_instance() && s->bucket->get_info().obj_lock_enabled();
- uint64_t obj_size;
- int64_t new_ofs, new_end;
+ op_ret = s->object->get_obj_state(this, obj_ctx, &astate, s->yield, true);
+ if (op_ret < 0) {
+ if (need_object_expiration() || multipart_delete) {
+ return;
+ }
- new_ofs = fst;
- new_end = lst;
+ if (check_obj_lock) {
+ /* check if obj exists, read orig attrs */
+ if (op_ret == -ENOENT) {
+ /* object maybe delete_marker, skip check_obj_lock*/
+ check_obj_lock = false;
+ } else {
+ return;
+ }
+ }
+ } else {
+ obj_size = astate->size;
+ etag = astate->attrset[RGW_ATTR_ETAG].to_str();
+ }
- rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
- rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
+ // ignore return value from get_obj_attrs in all other cases
+ op_ret = 0;
- RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- RGWRados::Object::Read read_op(&op_target);
- read_op.params.obj_size = &obj_size;
- read_op.params.attrs = &attrs;
+ if (check_obj_lock) {
+ ceph_assert(astate);
+ int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode);
+ if (object_lock_response != 0) {
+ op_ret = object_lock_response;
+ if (op_ret == -EACCES) {
+ s->err.message = "forbidden by object lock";
+ }
+ return;
+ }
+ }
- ret = read_op.prepare();
- if (ret < 0)
- return ret;
+ if (multipart_delete) {
+ if (!astate) {
+ op_ret = -ERR_NOT_SLO_MANIFEST;
+ return;
+ }
- bool need_decompress;
- op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
- if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
- return -EIO;
- }
+ const auto slo_attr = astate->attrset.find(RGW_ATTR_SLO_MANIFEST);
- bool partial_content = true;
- if (need_decompress)
- {
- obj_size = cs_info.orig_size;
- decompress.emplace(s->cct, &cs_info, partial_content, filter);
- filter = &*decompress;
- }
+ if (slo_attr != astate->attrset.end()) {
+ op_ret = handle_slo_manifest(slo_attr->second, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
+ }
+ } else {
+ op_ret = -ERR_NOT_SLO_MANIFEST;
+ }
- attr_iter = attrs.find(RGW_ATTR_MANIFEST);
- op_ret = this->get_decrypt_filter(&decrypt,
- filter,
- attrs,
- attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
- if (decrypt != nullptr) {
- filter = decrypt.get();
- }
- if (op_ret < 0) {
- return ret;
- }
+ return;
+ }
+ }
- ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
- if (ret < 0)
- return ret;
+ // make reservation for notification if needed
+ const auto versioned_object = s->bucket->versioning_enabled();
+ const auto event_type = versioned_object &&
+ s->object->get_instance().empty() ?
+ rgw::notify::ObjectRemovedDeleteMarkerCreated :
+ rgw::notify::ObjectRemovedDelete;
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(s->object.get(), s->src_object.get(), s,
+ event_type);
+ op_ret = res->publish_reserve(this);
+ if (op_ret < 0) {
+ return;
+ }
- filter->fixup_range(new_ofs, new_end);
- ret = read_op.iterate(new_ofs, new_end, filter);
+ s->object->set_atomic(s->obj_ctx);
+
+ bool ver_restored = false;
+ op_ret = s->object->swift_versioning_restore(s->obj_ctx, ver_restored, this);
+ if (op_ret < 0) {
+ return;
+ }
- if (ret >= 0)
- ret = filter->flush();
+ if (!ver_restored) {
+ uint64_t epoch = 0;
- bl.claim_append(bl_aux);
+ /* Swift's versioning mechanism hasn't found any previous version of
+ * the object that could be restored. This means we should proceed
+ * with the regular delete path. */
+ op_ret = get_system_versioning_params(s, &epoch, &version_id);
+ if (op_ret < 0) {
+ return;
+ }
- return ret;
-}
+ std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = s->object->get_delete_op(obj_ctx);
+ del_op->params.obj_owner = s->owner;
+ del_op->params.bucket_owner = s->bucket_owner;
+ del_op->params.versioning_status = s->bucket->get_info().versioning_status();
+ del_op->params.unmod_since = unmod_since;
+ del_op->params.high_precision_time = s->system_request;
+ del_op->params.olh_epoch = epoch;
+ del_op->params.marker_version_id = version_id;
-// special handling for compression type = "random" with multipart uploads
-static CompressorRef get_compressor_plugin(const req_state *s,
- const std::string& compression_type)
-{
- if (compression_type != "random") {
- return Compressor::create(s->cct, compression_type);
- }
+ op_ret = del_op->delete_obj(this, y);
+ if (op_ret >= 0) {
+ delete_marker = del_op->result.delete_marker;
+ version_id = del_op->result.version_id;
+ }
- bool is_multipart{false};
- const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
+ /* Check whether the object has expired. Swift API documentation
+ * stands that we should return 404 Not Found in such case. */
+ if (need_object_expiration() && s->object->is_expired()) {
+ op_ret = -ENOENT;
+ return;
+ }
+ }
- if (!is_multipart) {
- return Compressor::create(s->cct, compression_type);
- }
+ if (op_ret == -ECANCELED) {
+ op_ret = 0;
+ }
+ if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
+ op_ret = 0;
+ }
- // use a hash of the multipart upload id so all parts use the same plugin
- const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
- if (alg == Compressor::COMP_ALG_NONE) {
- return nullptr;
+ // send request to notification manager
+ int ret = res->publish_commit(this, obj_size, ceph::real_clock::now(), etag, version_id);
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
+ }
+ } else {
+ op_ret = -EINVAL;
}
- return Compressor::create(s->cct, alg);
}
-void RGWPutObj::execute()
+bool RGWCopyObj::parse_copy_location(const std::string_view& url_src,
+ string& bucket_name,
+ rgw_obj_key& key,
+ req_state* s)
{
- RGWPutObjProcessor *processor = NULL;
- RGWPutObjDataProcessor *filter = nullptr;
- std::unique_ptr<RGWPutObjDataProcessor> encrypt;
- char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
- char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
- MD5 hash;
- bufferlist bl, aclbl, bs;
- int len;
- map<string, string>::iterator iter;
- bool multipart;
-
- off_t fst;
- off_t lst;
- const auto& compression_type = store->get_zone_params().get_compression_type(
- s->bucket_info.placement_rule);
- CompressorRef plugin;
- boost::optional<RGWPutObj_Compress> compressor;
-
- bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
- perfcounter->inc(l_rgw_put);
- op_ret = -EINVAL;
- if (s->object.empty()) {
- goto done;
- }
+ std::string_view name_str;
+ std::string_view params_str;
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
- return;
+ // search for ? before url-decoding so we don't accidentally match %3F
+ size_t pos = url_src.find('?');
+ if (pos == string::npos) {
+ name_str = url_src;
+ } else {
+ name_str = url_src.substr(0, pos);
+ params_str = url_src.substr(pos + 1);
}
- op_ret = get_params();
- if (op_ret < 0) {
- ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
- goto done;
- }
+ if (name_str[0] == '/') // trim leading slash
+ name_str.remove_prefix(1);
- op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
- if (op_ret < 0) {
- ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
- << op_ret << dendl;
- goto done;
- }
+ std::string dec_src = url_decode(name_str);
- if (supplied_md5_b64) {
- need_calc_md5 = true;
+ pos = dec_src.find('/');
+ if (pos == string::npos)
+ return false;
- ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
- op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
- supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
- ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
- if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
- op_ret = -ERR_INVALID_DIGEST;
- goto done;
- }
+ bucket_name = dec_src.substr(0, pos);
+ key.name = dec_src.substr(pos + 1);
- buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
- ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
+ if (key.name.empty()) {
+ return false;
}
- if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
- we also check sizes at the end anyway */
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
- user_quota, bucket_quota, s->content_length);
- if (op_ret < 0) {
- ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
- goto done;
- }
- }
+ if (! params_str.empty()) {
+ RGWHTTPArgs args;
+ args.set(std::string(params_str));
+ args.parse(s);
- if (supplied_etag) {
- strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
- supplied_md5[sizeof(supplied_md5) - 1] = '\0';
+ key.instance = args.get("versionId", NULL);
}
- processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
-
- // no filters by default
- filter = processor;
+ return true;
+}
- /* Handle object versioning of Swift API. */
- if (! multipart) {
- rgw_obj obj(s->bucket, s->object);
- op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
- s->bucket_owner.get_id(),
- s->bucket_info,
- obj);
- if (op_ret < 0) {
- return;
- }
- }
+int RGWCopyObj::verify_permission(optional_yield y)
+{
+ RGWAccessControlPolicy src_acl(s->cct);
+ boost::optional<Policy> src_policy;
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return op_ret;
- op_ret = processor->prepare(store, NULL);
+ op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
if (op_ret < 0) {
- ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
- << dendl;
- goto done;
+ return op_ret;
}
- fst = copy_source_range_fst;
- lst = copy_source_range_lst;
-
- op_ret = get_encrypt_filter(&encrypt, filter);
+ op_ret = store->get_bucket(this, s->user.get(),
+ rgw_bucket(src_tenant_name,
+ src_bucket_name,
+ s->bucket_instance_id),
+ &src_bucket, y);
if (op_ret < 0) {
- goto done;
- }
- if (encrypt != nullptr) {
- filter = encrypt.get();
- } else {
- //no encryption, we can try compression
- if (compression_type != "none") {
- plugin = get_compressor_plugin(s, compression_type);
- if (!plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for compression type "
- << compression_type << dendl;
- } else {
- compressor.emplace(s->cct, plugin, filter);
- filter = &*compressor;
- }
+ if (op_ret == -ENOENT) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
}
+ return op_ret;
}
- do {
- bufferlist data_in;
- if (fst > lst)
- break;
- if (!copy_source) {
- len = get_data(data_in);
- } else {
- uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
- op_ret = get_data(fst, cur_lst, data_in);
- if (op_ret < 0)
- goto done;
- len = data_in.length();
- s->content_length += len;
- fst += len;
- }
- if (len < 0) {
- op_ret = len;
- goto done;
- }
+ /* This is the only place the bucket is set on src_object */
+ s->src_object->set_bucket(src_bucket.get());
+ /* get buckets info (source and dest) */
+ if (s->local_source && source_zone.empty()) {
+ s->src_object->set_atomic(s->obj_ctx);
+ s->src_object->set_prefetch_data(s->obj_ctx);
- bufferlist &data = data_in;
- if (len && s->aws4_auth_streaming_mode) {
- /* use unwrapped data */
- data = s->aws4_auth->bl;
- len = data.length();
- }
+ rgw_placement_rule src_placement;
- if (need_calc_md5) {
- hash.Update((const byte *)data.c_str(), data.length());
+ /* check source object permissions */
+ op_ret = read_obj_policy(this, store, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class,
+ src_policy, src_bucket.get(), s->src_object.get(), y);
+ if (op_ret < 0) {
+ return op_ret;
}
- /* save data for producing torrent data */
- torrent.save_data(data_in);
+ /* follow up on previous checks that required reading source object head */
+ if (need_to_check_storage_class) {
+ src_placement.inherit_from(src_bucket->get_placement_rule());
- /* do we need this operation to be synchronous? if we're dealing with an object with immutable
- * head, e.g., multipart object we need to make sure we're the first one writing to this object
- */
- bool need_to_wait = (ofs == 0) && multipart;
-
- bufferlist orig_data;
-
- if (need_to_wait) {
- orig_data = data;
+ op_ret = check_storage_class(src_placement);
+ if (op_ret < 0) {
+ return op_ret;
+ }
}
- op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
- if (op_ret < 0) {
- if (!need_to_wait || op_ret != -EEXIST) {
- ldout(s->cct, 20) << "processor->thottle_data() returned ret="
- << op_ret << dendl;
- goto done;
+ /* admin request overrides permission checks */
+ if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
+ if (src_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, src_policy, s->iam_user_policies, s->session_policies);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+
+ ARN obj_arn(s->src_object->get_obj());
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ s->src_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion,
+ obj_arn);
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ auto e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ if (src_policy) {
+ e = src_policy->eval(s->env, *s->auth.identity,
+ s->src_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion,
+ obj_arn,
+ princ_type);
+ }
+ if (e == Effect::Deny) {
+ return -EACCES;
+ }
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ s->src_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion,
+ obj_arn);
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
+ (session_policy_res != Effect::Allow || e != Effect::Allow)) {
+ return -EACCES;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
+ return -EACCES;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
+ return -EACCES;
+ }
+ }
}
- /* need_to_wait == true and op_ret == -EEXIST */
- ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
-
- /* restore original data */
- data.swap(orig_data);
+ if (identity_policy_res == Effect::Pass && e == Effect::Pass &&
+ !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
+ RGW_PERM_READ)) {
+ return -EACCES;
+ }
+ //remove src object tags as it may interfere with policy evaluation of destination obj
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_remove_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+
+ } else if (!src_acl.verify_permission(this, *s->auth.identity,
+ s->perm_mask,
+ RGW_PERM_READ)) {
+ return -EACCES;
+ }
+ }
+ }
- /* restart processing with different oid suffix */
+ RGWAccessControlPolicy dest_bucket_policy(s->cct);
- dispose_processor(processor);
- processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
- filter = processor;
+ if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
+ or intra region sync */
+ dest_bucket = src_bucket->clone();
+ } else {
+ op_ret = store->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ ldpp_dout(this, 0) << "ERROR: Destination Bucket not found for user: " << s->user->get_id().to_str() << dendl;
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ }
+ return op_ret;
+ }
+ }
- string oid_rand;
- char buf[33];
- gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
- oid_rand.append(buf);
+ dest_object = dest_bucket->get_object(rgw_obj_key(dest_obj_name));
+ dest_object->set_atomic(s->obj_ctx);
- op_ret = processor->prepare(store, &oid_rand);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
- << op_ret << dendl;
- goto done;
+ /* check dest bucket permissions */
+ op_ret = read_bucket_policy(this, store, s, dest_bucket->get_info(),
+ dest_bucket->get_attrs(),
+ &dest_bucket_policy, dest_bucket->get_key(), y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+ auto dest_iam_policy = get_iam_policy_from_attr(s->cct, dest_bucket->get_attrs(), dest_bucket->get_tenant());
+ /* admin request overrides permission checks */
+ if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){
+ if (dest_iam_policy != boost::none || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ //Add destination bucket tags for authorization
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, dest_iam_policy, s->iam_user_policies, s->session_policies);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s, dest_bucket.get());
+
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
+ if (md_directive)
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
+ *md_directive);
+
+ ARN obj_arn(dest_object->get_obj());
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies,
+ s->env,
+ rgw::IAM::s3PutObject,
+ obj_arn);
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
}
-
- op_ret = get_encrypt_filter(&encrypt, filter);
- if (op_ret < 0) {
- goto done;
+ auto e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ if (dest_iam_policy) {
+ e = dest_iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject,
+ obj_arn,
+ princ_type);
}
- if (encrypt != nullptr) {
- filter = encrypt.get();
- } else {
- if (compressor) {
- compressor.emplace(s->cct, plugin, filter);
- filter = &*compressor;
+ if (e == Effect::Deny) {
+ return -EACCES;
+ }
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env, rgw::IAM::s3PutObject, obj_arn);
+ if (session_policy_res == Effect::Deny) {
+ return false;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
+ (session_policy_res != Effect::Allow || e == Effect::Allow)) {
+ return -EACCES;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
+ return -EACCES;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
+ return -EACCES;
+ }
}
}
- op_ret = put_data_and_throttle(filter, data, ofs, false);
- if (op_ret < 0) {
- goto done;
+ if (identity_policy_res == Effect::Pass && e == Effect::Pass &&
+ ! dest_bucket_policy.verify_permission(this,
+ *s->auth.identity,
+ s->perm_mask,
+ RGW_PERM_WRITE)){
+ return -EACCES;
}
+ } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
+ RGW_PERM_WRITE)) {
+ return -EACCES;
}
- ofs += len;
- } while (len > 0);
+ }
- {
- bufferlist flush;
- op_ret = put_data_and_throttle(filter, flush, ofs, false);
- if (op_ret < 0) {
- goto done;
+ op_ret = init_dest_policy();
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ return 0;
+}
+
+
+int RGWCopyObj::init_common()
+{
+ if (if_mod) {
+ if (parse_time(if_mod, &mod_time) < 0) {
+ op_ret = -EINVAL;
+ return op_ret;
+ }
+ mod_ptr = &mod_time;
+ }
+
+ if (if_unmod) {
+ if (parse_time(if_unmod, &unmod_time) < 0) {
+ op_ret = -EINVAL;
+ return op_ret;
}
+ unmod_ptr = &unmod_time;
}
- if (!chunked_upload &&
- ofs != s->content_length &&
- !s->aws4_auth_streaming_mode) {
- op_ret = -ERR_REQUEST_TIMEOUT;
- goto done;
- }
- s->obj_size = ofs;
+ bufferlist aclbl;
+ dest_policy.encode(aclbl);
+ emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
- perfcounter->inc(l_rgw_put_b, s->obj_size);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+ populate_with_generic_attrs(s, attrs);
- if (s->aws4_auth_needs_complete) {
+ return 0;
+}
- /* complete aws4 auth */
+static void copy_obj_progress_cb(off_t ofs, void *param)
+{
+ RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
+ op->progress_cb(ofs);
+}
- op_ret = RGW_Auth_S3::authorize_aws4_auth_complete(store, s);
- if (op_ret) {
- goto done;
- }
+void RGWCopyObj::progress_cb(off_t ofs)
+{
+ if (!s->cct->_conf->rgw_copy_obj_progress)
+ return;
- s->aws4_auth_needs_complete = false;
+ if (ofs - last_ofs <
+ static_cast<off_t>(s->cct->_conf->rgw_copy_obj_progress_every_bytes)) {
+ return;
+ }
- /* verify signature */
+ send_partial_response(ofs);
- if (s->aws4_auth->signature != s->aws4_auth->new_signature) {
- op_ret = -ERR_SIGNATURE_NO_MATCH;
- ldout(s->cct, 20) << "delayed aws4 auth failed" << dendl;
- goto done;
- }
+ last_ofs = ofs;
+}
- /* authorization ok */
+void RGWCopyObj::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- dout(10) << "v4 auth ok" << dendl;
+void RGWCopyObj::execute(optional_yield y)
+{
+ if (init_common() < 0)
+ return;
- }
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
- user_quota, bucket_quota, s->obj_size);
+ // make reservation for notification if needed
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(
+ s->object.get(), s->src_object.get(),
+ s, rgw::notify::ObjectCreatedCopy);
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
- ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
- goto done;
+ return;
}
- hash.Final(m);
-
- if (compressor && compressor->is_compressed()) {
- bufferlist tmp;
- RGWCompressionInfo cs_info;
- cs_info.compression_type = plugin->get_type_name();
- cs_info.orig_size = s->obj_size;
- cs_info.blocks = move(compressor->get_compression_blocks());
- ::encode(cs_info, tmp);
- attrs[RGW_ATTR_COMPRESSION] = tmp;
- ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
- << " with type=" << cs_info.compression_type
- << ", orig_size=" << cs_info.orig_size
- << ", blocks=" << cs_info.blocks.size() << dendl;
+ if ( ! version_id.empty()) {
+ dest_object->set_instance(version_id);
+ } else if (dest_bucket->versioning_enabled()) {
+ dest_object->gen_rand_obj_instance_name();
}
- buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
+ s->src_object->set_atomic(s->obj_ctx);
+ dest_object->set_atomic(s->obj_ctx);
- etag = calc_md5;
+ encode_delete_at_attr(delete_at, attrs);
- if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
- op_ret = -ERR_BAD_DIGEST;
- goto done;
+ if (obj_retention) {
+ bufferlist obj_retention_bl;
+ obj_retention->encode(obj_retention_bl);
+ emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
+ }
+ if (obj_legal_hold) {
+ bufferlist obj_legal_hold_bl;
+ obj_legal_hold->encode(obj_legal_hold_bl);
+ emplace_attr(RGW_ATTR_OBJECT_LEGAL_HOLD, std::move(obj_legal_hold_bl));
}
- policy.encode(aclbl);
- emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
-
- if (dlo_manifest) {
- op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
+ uint64_t obj_size = 0;
+ {
+ // get src object size (cached in obj_ctx from verify_permission())
+ RGWObjState* astate = nullptr;
+ op_ret = s->src_object->get_obj_state(this, s->obj_ctx, &astate, s->yield, true);
if (op_ret < 0) {
- ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
- goto done;
+ return;
}
- complete_etag(hash, &etag);
- ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
- }
-
- if (slo_info) {
- bufferlist manifest_bl;
- ::encode(*slo_info, manifest_bl);
- emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
- hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
- complete_etag(hash, &etag);
- ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
- }
+ /* Check if the src object is cloud-tiered */
+ bufferlist bl;
+ if (astate->get_attr(RGW_ATTR_MANIFEST, bl)) {
+ RGWObjManifest m;
+ decode(m, bl);
+ if (m.get_tier_type() == "cloud-s3") {
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ ldpp_dout(this, 0) << "ERROR: Cannot copy cloud tiered object. Failing with "
+ << op_ret << dendl;
+ return;
+ }
+ }
- if (supplied_etag && etag.compare(supplied_etag) != 0) {
- op_ret = -ERR_UNPROCESSABLE_ENTITY;
- goto done;
+ obj_size = astate->size;
+
+ if (!s->system_request) { // no quota enforcement for system requests
+ if (astate->accounted_size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
+ op_ret = -ERR_TOO_LARGE;
+ return;
+ }
+ // enforce quota against the destination bucket owner
+ op_ret = dest_bucket->check_quota(this, user_quota, bucket_quota,
+ astate->accounted_size, y);
+ if (op_ret < 0) {
+ return;
+ }
+ }
}
- bl.append(etag.c_str(), etag.size() + 1);
- emplace_attr(RGW_ATTR_ETAG, std::move(bl));
- populate_with_generic_attrs(s, attrs);
- rgw_get_request_metadata(s->cct, s->info, attrs);
- encode_delete_at_attr(delete_at, attrs);
+ bool high_precision_time = (s->system_request);
- /* Add a custom metadata to expose the information whether an object
- * is an SLO or not. Appending the attribute must be performed AFTER
- * processing any input from user in order to prohibit overwriting. */
- if (slo_info) {
- bufferlist slo_userindicator_bl;
- ::encode("True", slo_userindicator_bl);
- emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
+ /* Handle object versioning of Swift API. In case of copying to remote this
+ * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
+ op_ret = dest_object->swift_versioning_copy(s->obj_ctx, this, s->yield);
+ if (op_ret < 0) {
+ return;
}
- op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
- (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
- (user_data.empty() ? nullptr : &user_data));
+ RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
+ op_ret = s->src_object->copy_object(obj_ctx,
+ s->user.get(),
+ &s->info,
+ source_zone,
+ dest_object.get(),
+ dest_bucket.get(),
+ src_bucket.get(),
+ s->dest_placement,
+ &src_mtime,
+ &mtime,
+ mod_ptr,
+ unmod_ptr,
+ high_precision_time,
+ if_match,
+ if_nomatch,
+ attrs_mod,
+ copy_if_newer,
+ attrs,
+ RGWObjCategory::Main,
+ olh_epoch,
+ delete_at,
+ (version_id.empty() ? NULL : &version_id),
+ &s->req_id, /* use req_id as tag */
+ &etag,
+ copy_obj_progress_cb, (void *)this,
+ this,
+ s->yield);
+
+ // send request to notification manager
+ int ret = res->publish_commit(this, obj_size, mtime, etag, dest_object->get_instance());
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
+ }
+}
- /* produce torrent */
- if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
- {
- torrent.init(s, store);
- torrent.set_create_date(mtime);
- op_ret = torrent.handle_data();
- if (0 != op_ret)
- {
- ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
- goto done;
+int RGWGetACLs::verify_permission(optional_yield y)
+{
+ bool perm;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ auto iam_action = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObjectAcl :
+ rgw::IAM::s3GetObjectVersionAcl;
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+ perm = verify_object_permission(this, s, iam_action);
+ } else {
+ if (!s->bucket_exists) {
+ return -ERR_NO_SUCH_BUCKET;
}
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+ perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl);
}
+ if (!perm)
+ return -EACCES;
-done:
- dispose_processor(processor);
- perfcounter->tinc(l_rgw_put_lat,
- (ceph_clock_now() - s->time));
+ return 0;
}
-int RGWPostObj::verify_permission()
+void RGWGetACLs::pre_exec()
{
- return 0;
+ rgw_bucket_object_pre_exec(s);
}
-/*
-RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
+
+void RGWGetACLs::execute(optional_yield y)
+{
+ stringstream ss;
+ RGWAccessControlPolicy* const acl = \
+ (!rgw::sal::Object::empty(s->object.get()) ? s->object_acl.get() : s->bucket_acl.get());
+ RGWAccessControlPolicy_S3* const s3policy = \
+ static_cast<RGWAccessControlPolicy_S3*>(acl);
+ s3policy->to_xml(ss);
+ acls = ss.str();
+}
+
+
+
+int RGWPutACLs::verify_permission(optional_yield y)
{
- RGWPutObjProcessor *processor;
+ bool perm;
- uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
- processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
+ rgw_add_grant_to_iam_environment(s->env, s);
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ auto iam_action = s->object->get_instance().empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl;
+ op_ret = rgw_iam_add_objtags(this, s, true, true);
+ perm = verify_object_permission(this, s, iam_action);
+ } else {
+ op_ret = rgw_iam_add_buckettags(this, s);
+ perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl);
+ }
+ if (!perm)
+ return -EACCES;
- return processor;
+ return 0;
}
-void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
+int RGWGetLC::verify_permission(optional_yield y)
{
- delete processor;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ bool perm;
+ perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration);
+ if (!perm)
+ return -EACCES;
+
+ return 0;
}
-*/
-void RGWPostObj::pre_exec()
+
+int RGWPutLC::verify_permission(optional_yield y)
{
- rgw_bucket_object_pre_exec(s);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ bool perm;
+ perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
+ if (!perm)
+ return -EACCES;
+
+ return 0;
}
-void RGWPostObj::execute()
+int RGWDeleteLC::verify_permission(optional_yield y)
{
- RGWPutObjDataProcessor *filter = nullptr;
- boost::optional<RGWPutObj_Compress> compressor;
- CompressorRef plugin;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- /* Read in the data from the POST form. */
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
- op_ret = verify_params();
- if (op_ret < 0) {
- return;
- }
+ bool perm;
+ perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
+ if (!perm)
+ return -EACCES;
- if (!verify_bucket_permission(s, RGW_PERM_WRITE)) {
- op_ret = -EACCES;
- return;
- }
+ return 0;
+}
- /* Start iteration over data fields. It's necessary as Swift's FormPost
- * is capable to handle multiple files in single form. */
- do {
- std::unique_ptr<RGWPutObjDataProcessor> encrypt;
- char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
- MD5 hash;
- ceph::buffer::list bl, aclbl;
- int len = 0;
+void RGWPutACLs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- op_ret = store->check_quota(s->bucket_owner.get_id(),
- s->bucket,
- user_quota,
- bucket_quota,
- s->content_length);
- if (op_ret < 0) {
- return;
- }
+void RGWGetLC::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
- s->bucket_info,
- s->bucket,
- get_current_filename(),
- /* part size */
- s->cct->_conf->rgw_obj_stripe_size,
- s->req_id,
- s->bucket_info.versioning_enabled());
- /* No filters by default. */
- filter = &processor;
+void RGWPutLC::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- op_ret = processor.prepare(store, nullptr);
- if (op_ret < 0) {
- return;
- }
+void RGWDeleteLC::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- op_ret = get_encrypt_filter(&encrypt, filter);
- if (op_ret < 0) {
- return;
- }
- if (encrypt != nullptr) {
- filter = encrypt.get();
- } else {
- const auto& compression_type = store->get_zone_params().get_compression_type(
- s->bucket_info.placement_rule);
- if (compression_type != "none") {
- plugin = Compressor::create(s->cct, compression_type);
- if (!plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for compression type "
- << compression_type << dendl;
- } else {
- compressor.emplace(s->cct, plugin, filter);
- filter = &*compressor;
- }
- }
- }
+void RGWPutACLs::execute(optional_yield y)
+{
+ bufferlist bl;
- bool again;
- do {
- ceph::bufferlist data;
- len = get_data(data, again);
+ RGWAccessControlPolicy_S3 *policy = NULL;
+ RGWACLXMLParser_S3 parser(s->cct);
+ RGWAccessControlPolicy_S3 new_policy(s->cct);
+ stringstream ss;
- if (len < 0) {
- op_ret = len;
- return;
- }
+ op_ret = 0; /* XXX redundant? */
- if (!len) {
- break;
- }
+ if (!parser.init()) {
+ op_ret = -EINVAL;
+ return;
+ }
- hash.Update((const byte *)data.c_str(), data.length());
- op_ret = put_data_and_throttle(filter, data, ofs, false);
- ofs += len;
+ RGWAccessControlPolicy* const existing_policy = \
+ (rgw::sal::Object::empty(s->object.get()) ? s->bucket_acl.get() : s->object_acl.get());
- if (ofs > max_len) {
- op_ret = -ERR_TOO_LARGE;
- return;
- }
- } while (again);
+ owner = existing_policy->get_owner();
- {
- bufferlist flush;
- op_ret = put_data_and_throttle(filter, flush, ofs, false);
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ if (op_ret == -ERANGE) {
+ ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
+ << s->length << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ s->err.message = "The XML you provided was larger than the maximum " +
+ std::to_string(s->cct->_conf->rgw_max_put_param_size) +
+ " bytes allowed.";
}
+ return;
+ }
- if (len < min_len) {
- op_ret = -ERR_TOO_SMALL;
- return;
- }
+ char* buf = data.c_str();
+ ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
- s->obj_size = ofs;
+ if (!s->canned_acl.empty() && data.length() > 0) {
+ op_ret = -EINVAL;
+ return;
+ }
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
- user_quota, bucket_quota, s->obj_size);
- if (op_ret < 0) {
+ if (!s->canned_acl.empty() || s->has_acl_header) {
+ op_ret = get_policy_from_state(store, s, ss);
+ if (op_ret < 0)
return;
- }
- hash.Final(m);
- buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
+ data.clear();
+ data.append(ss.str());
+ }
- etag = calc_md5;
- bl.append(etag.c_str(), etag.size() + 1);
- emplace_attr(RGW_ATTR_ETAG, std::move(bl));
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ op_ret = -EINVAL;
+ return;
+ }
+ policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
+ if (!policy) {
+ op_ret = -EINVAL;
+ return;
+ }
- policy.encode(aclbl);
- emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
+ const RGWAccessControlList& req_acl = policy->get_acl();
+ const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
+#define ACL_GRANTS_MAX_NUM 100
+ int max_num = s->cct->_conf->rgw_acl_grants_max_num;
+ if (max_num < 0) {
+ max_num = ACL_GRANTS_MAX_NUM;
+ }
- const std::string content_type = get_current_content_type();
- if (! content_type.empty()) {
- ceph::bufferlist ct_bl;
- ct_bl.append(content_type.c_str(), content_type.size() + 1);
- emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
- }
+ int grants_num = req_grant_map.size();
+ if (grants_num > max_num) {
+ ldpp_dout(this, 4) << "An acl can have up to " << max_num
+ << " grants, request acl grants num: " << grants_num << dendl;
+ op_ret = -ERR_LIMIT_EXCEEDED;
+ s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
+ + std::to_string(max_num)
+ + " grants allowed in an acl.";
+ return;
+ }
- if (compressor && compressor->is_compressed()) {
- ceph::bufferlist tmp;
- RGWCompressionInfo cs_info;
- cs_info.compression_type = plugin->get_type_name();
- cs_info.orig_size = s->obj_size;
- cs_info.blocks = move(compressor->get_compression_blocks());
- ::encode(cs_info, tmp);
- emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
+ // forward bucket acl requests to meta master zone
+ if ((rgw::sal::Object::empty(s->object.get()))) {
+ bufferlist in_data;
+ // include acl data unless it was generated from a canned_acl
+ if (s->canned_acl.empty()) {
+ in_data.append(data);
}
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+ }
- op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
- attrs, (delete_at ? *delete_at : real_time()));
- } while (is_next_file_to_upload());
-}
-
+ if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
+ ldpp_dout(this, 15) << "Old AccessControlPolicy";
+ policy->to_xml(*_dout);
+ *_dout << dendl;
+ }
-void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
- const set<string>& rmattr_names,
- map<int, string>& temp_url_keys)
-{
- map<string, bufferlist>::iterator iter;
+ op_ret = policy->rebuild(this, store, &owner, new_policy, s->err.message);
+ if (op_ret < 0)
+ return;
- iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
- if (iter != add_attrs.end()) {
- temp_url_keys[0] = iter->second.c_str();
- add_attrs.erase(iter);
+ if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
+ ldpp_dout(this, 15) << "New AccessControlPolicy:";
+ new_policy.to_xml(*_dout);
+ *_dout << dendl;
}
- iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
- if (iter != add_attrs.end()) {
- temp_url_keys[1] = iter->second.c_str();
- add_attrs.erase(iter);
+ if (s->bucket_access_conf &&
+ s->bucket_access_conf->block_public_acls() &&
+ new_policy.is_public(this)) {
+ op_ret = -EACCES;
+ return;
}
+ new_policy.encode(bl);
+ map<string, bufferlist> attrs;
- for (const string& name : rmattr_names) {
- if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
- temp_url_keys[0] = string();
- }
- if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
- temp_url_keys[1] = string();
- }
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ s->object->set_atomic(s->obj_ctx);
+ //if instance is empty, we should modify the latest object
+ op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_ACL, bl, s->yield, this);
+ } else {
+ map<string,bufferlist> attrs = s->bucket_attrs;
+ attrs[RGW_ATTR_ACL] = bl;
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ }
+ if (op_ret == -ECANCELED) {
+ op_ret = 0; /* lost a race, but it's ok because acls are immutable */
}
}
-int RGWPutMetadataAccount::init_processing()
+void RGWPutLC::execute(optional_yield y)
{
- /* First, go to the base class. At the time of writing the method was
- * responsible only for initializing the quota. This isn't necessary
- * here as we are touching metadata only. I'm putting this call only
- * for the future. */
- op_ret = RGWOp::init_processing();
- if (op_ret < 0) {
- return op_ret;
- }
+ bufferlist bl;
+
+ RGWLifecycleConfiguration_S3 config(s->cct);
+ RGWXMLParser parser;
+ RGWLifecycleConfiguration_S3 new_config(s->cct);
- op_ret = get_params();
- if (op_ret < 0) {
- return op_ret;
+ content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
+ if (content_md5 == nullptr) {
+ op_ret = -ERR_INVALID_REQUEST;
+ s->err.message = "Missing required header for this request: Content-MD5";
+ ldpp_dout(this, 5) << s->err.message << dendl;
+ return;
}
- op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
- &acct_op_tracker);
- if (op_ret < 0) {
- return op_ret;
+ std::string content_md5_bin;
+ try {
+ content_md5_bin = rgw::from_base64(std::string_view(content_md5));
+ } catch (...) {
+ s->err.message = "Request header Content-MD5 contains character "
+ "that is not base64 encoded.";
+ ldpp_dout(this, 5) << s->err.message << dendl;
+ op_ret = -ERR_BAD_DIGEST;
+ return;
}
- if (has_policy) {
- bufferlist acl_bl;
- policy.encode(acl_bl);
- attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
+ if (!parser.init()) {
+ op_ret = -EINVAL;
+ return;
}
- rgw_get_request_metadata(s->cct, s->info, attrs, false);
- prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
- populate_with_generic_attrs(s, attrs);
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
- /* Try extract the TempURL-related stuff now to allow verify_permission
- * evaluate whether we need FULL_CONTROL or not. */
- filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
+ char* buf = data.c_str();
+ ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
- /* The same with quota except a client needs to be reseller admin. */
- op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
- &new_quota_extracted);
- if (op_ret < 0) {
- return op_ret;
- }
+ MD5 data_hash;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ data_hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
+ data_hash.Final(data_hash_res);
- return 0;
-}
+ if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
+ op_ret = -ERR_BAD_DIGEST;
+ s->err.message = "The Content-MD5 you specified did not match what we received.";
+ ldpp_dout(this, 5) << s->err.message
+ << " Specified content md5: " << content_md5
+ << ", calculated content md5: " << data_hash_res
+ << dendl;
+ return;
+ }
-int RGWPutMetadataAccount::verify_permission()
-{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
+ if (!parser.parse(buf, data.length(), 1)) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- if (!verify_user_permission(s, RGW_PERM_WRITE)) {
- return -EACCES;
+ try {
+ RGWXMLDecoder::decode_xml("LifecycleConfiguration", config, &parser);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "Bad lifecycle configuration: " << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- /* Altering TempURL keys requires FULL_CONTROL. */
- if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
- return -EPERM;
+ op_ret = config.rebuild(new_config);
+ if (op_ret < 0)
+ return;
+
+ if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
+ XMLFormatter xf;
+ new_config.dump_xml(&xf);
+ stringstream ss;
+ xf.flush(ss);
+ ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
}
- /* We are failing this intensionally to allow system user/reseller admin
- * override in rgw_process.cc. This is the way to specify a given RGWOp
- * expect extra privileges. */
- if (new_quota_extracted) {
- return -EACCES;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
}
- return 0;
+ op_ret = store->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config);
+ if (op_ret < 0) {
+ return;
+ }
+ return;
}
-void RGWPutMetadataAccount::execute()
+void RGWDeleteLC::execute(optional_yield y)
{
- /* Params have been extracted earlier. See init_processing(). */
- RGWUserInfo new_uinfo;
- op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
- &acct_op_tracker);
+ bufferlist data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- /* Handle the TempURL-related stuff. */
- if (!temp_url_keys.empty()) {
- for (auto& pair : temp_url_keys) {
- new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
- }
+ op_ret = store->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs);
+ if (op_ret < 0) {
+ return;
}
+ return;
+}
- /* Handle the quota extracted at the verify_permission step. */
- if (new_quota_extracted) {
- new_uinfo.user_quota = std::move(new_quota);
+int RGWGetCORS::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
+}
+
+void RGWGetCORS::execute(optional_yield y)
+{
+ op_ret = read_bucket_cors();
+ if (op_ret < 0)
+ return ;
+
+ if (!cors_exist) {
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
+ op_ret = -ERR_NO_CORS_FOUND;
+ return;
}
+}
- /* We are passing here the current (old) user info to allow the function
- * optimize-out some operations. */
- op_ret = rgw_store_user_info(store, new_uinfo, s->user,
- &acct_op_tracker, real_time(), false, &attrs);
+int RGWPutCORS::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
}
-int RGWPutMetadataBucket::verify_permission()
+void RGWPutCORS::execute(optional_yield y)
{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE)) {
- return -EACCES;
+ rgw_raw_obj obj;
+
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
+
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
}
- return 0;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ attrs[RGW_ATTR_CORS] = cors_bl;
+ return s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ });
}
-void RGWPutMetadataBucket::pre_exec()
+int RGWDeleteCORS::verify_permission(optional_yield y)
{
- rgw_bucket_object_pre_exec(s);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ // No separate delete permission
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
}
-void RGWPutMetadataBucket::execute()
+void RGWDeleteCORS::execute(optional_yield y)
{
- op_ret = get_params();
+ bufferlist data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- rgw_get_request_metadata(s->cct, s->info, attrs, false);
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ op_ret = read_bucket_cors();
+ if (op_ret < 0)
+ return op_ret;
- if (!placement_rule.empty() &&
- placement_rule != s->bucket_info.placement_rule) {
- op_ret = -EEXIST;
- return;
+ if (!cors_exist) {
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
+ op_ret = -ENOENT;
+ return op_ret;
+ }
+
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ attrs.erase(RGW_ATTR_CORS);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ }
+ return op_ret;
+ });
+}
+
+void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
+ get_cors_response_headers(this, rule, req_hdrs, hdrs, exp_hdrs, max_age);
+}
+
+int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
+ rule = cc->host_name_rule(origin);
+ if (!rule) {
+ ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
+ return -ENOENT;
}
- /* Encode special metadata first as we're using std::map::emplace under
- * the hood. This method will add the new items only if the map doesn't
- * contain such keys yet. */
- if (has_policy) {
- if (s->dialect.compare("swift") == 0) {
- auto old_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl);
- auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
- new_policy->filter_merge(policy_rw_mask, old_policy);
- policy = *new_policy;
- }
- buffer::list bl;
- policy.encode(bl);
- emplace_attr(RGW_ATTR_ACL, std::move(bl));
+ if (!validate_cors_rule_method(this, rule, req_meth)) {
+ return -ENOENT;
+ }
+
+ if (!validate_cors_rule_header(this, rule, req_hdrs)) {
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+void RGWOptionsCORS::execute(optional_yield y)
+{
+ op_ret = read_bucket_cors();
+ if (op_ret < 0)
+ return;
+
+ origin = s->info.env->get("HTTP_ORIGIN");
+ if (!origin) {
+ ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
+ if (!req_meth) {
+ ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
+ op_ret = -EINVAL;
+ return;
}
-
- if (has_cors) {
- buffer::list bl;
- cors_config.encode(bl);
- emplace_attr(RGW_ATTR_CORS, std::move(bl));
+ if (!cors_exist) {
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
+ op_ret = -ENOENT;
+ return;
}
-
- /* It's supposed that following functions WILL NOT change any special
- * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
- prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
- populate_with_generic_attrs(s, attrs);
-
- /* According to the Swift's behaviour and its container_quota WSGI middleware
- * implementation: anyone with write permissions is able to set the bucket
- * quota. This stays in contrast to account quotas that can be set only by
- * clients holding reseller admin privileges. */
- op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
- if (op_ret < 0) {
+ req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
+ op_ret = validate_cors_request(&bucket_cors);
+ if (!rule) {
+ origin = req_meth = NULL;
return;
}
+ return;
+}
- if (swift_ver_location) {
- s->bucket_info.swift_ver_location = *swift_ver_location;
- s->bucket_info.swift_versioning = (! swift_ver_location->empty());
- }
+int RGWGetRequestPayment::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
+}
- /* Web site of Swift API. */
- filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
- s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
+void RGWGetRequestPayment::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- /* Setting attributes also stores the provided bucket info. Due to this
- * fact, the new quota settings can be serialized with the same call. */
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
- &s->bucket_info.objv_tracker);
+void RGWGetRequestPayment::execute(optional_yield y)
+{
+ requester_pays = s->bucket->get_info().requester_pays;
}
-int RGWPutMetadataObject::verify_permission()
+int RGWSetRequestPayment::verify_permission(optional_yield y)
{
- if (!verify_object_permission(s, RGW_PERM_WRITE)) {
- return -EACCES;
- }
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- return 0;
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
}
-void RGWPutMetadataObject::pre_exec()
+void RGWSetRequestPayment::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWPutMetadataObject::execute()
+void RGWSetRequestPayment::execute(optional_yield y)
{
- rgw_obj obj(s->bucket, s->object);
- map<string, bufferlist> attrs, orig_attrs, rmattrs;
- store->set_atomic(s->obj_ctx, obj);
-
- op_ret = get_params();
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- rgw_get_request_metadata(s->cct, s->info, attrs);
- /* check if obj exists, read orig attrs */
- op_ret = get_obj_attrs(store, s, obj, orig_attrs);
- if (op_ret < 0) {
- return;
- }
+ op_ret = get_params(y);
- /* Check whether the object has expired. Swift API documentation
- * stands that we should return 404 Not Found in such case. */
- if (need_object_expiration() && object_is_expired(orig_attrs)) {
- op_ret = -ENOENT;
+ if (op_ret < 0)
return;
- }
-
- /* Filter currently existing attributes. */
- prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
- populate_with_generic_attrs(s, attrs);
- encode_delete_at_attr(delete_at, attrs);
- if (dlo_manifest) {
- op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
- return;
- }
+ s->bucket->get_info().requester_pays = requester_pays;
+ op_ret = s->bucket->put_info(this, false, real_time());
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ return;
}
-
- op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
+ s->bucket_attrs = s->bucket->get_attrs();
}
-int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
+int RGWInitMultipart::verify_permission(optional_yield y)
{
- RGWSLOInfo slo_info;
- bufferlist::iterator bliter = bl.begin();
- try {
- ::decode(slo_info, bliter);
- } catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
- return -EIO;
- }
-
- try {
- deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
- new RGWBulkDelete::Deleter(store, s));
- } catch (std::bad_alloc) {
- return -ENOMEM;
- }
-
- list<RGWBulkDelete::acct_path_t> items;
- for (const auto& iter : slo_info.entries) {
- const string& path_str = iter.path;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
- if (string::npos == sep_pos) {
- return -EINVAL;
+ if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
}
- RGWBulkDelete::acct_path_t path;
-
- string bucket_name;
- url_decode(path_str.substr(1, sep_pos - 1), bucket_name);
-
- string obj_name;
- url_decode(path_str.substr(sep_pos + 1), obj_name);
-
- path.bucket_name = bucket_name;
- path.obj_key = obj_name;
-
- items.push_back(path);
- }
-
- /* Request removal of the manifest object itself. */
- RGWBulkDelete::acct_path_t path;
- path.bucket_name = s->bucket_name;
- path.obj_key = s->object;
- items.push_back(path);
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ ARN obj_arn(s->object->get_obj());
+ if (s->iam_policy) {
+ e = s->iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject,
+ obj_arn,
+ princ_type);
+ }
+ if (e == Effect::Deny) {
+ return -EACCES;
+ }
- int ret = deleter->delete_chunk(items);
- if (ret < 0) {
- return ret;
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow)) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return 0;
+ }
+ }
+ return -EACCES;
+ }
+ if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
+ return 0;
+ }
}
- return 0;
-}
-
-int RGWDeleteObj::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE)) {
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
return -EACCES;
}
return 0;
}
-void RGWDeleteObj::pre_exec()
+void RGWInitMultipart::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWDeleteObj::execute()
+void RGWInitMultipart::execute(optional_yield y)
{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ bufferlist aclbl, tracebl;
+ rgw::sal::Attrs attrs;
+
+ if (get_params(y) < 0)
return;
- }
- op_ret = get_params();
- if (op_ret < 0) {
+ if (rgw::sal::Object::empty(s->object.get()))
return;
- }
- rgw_obj obj(s->bucket, s->object);
- map<string, bufferlist> attrs;
+ if (multipart_trace) {
+ tracing::encode(multipart_trace->GetContext(), tracebl);
+ attrs[RGW_ATTR_TRACE] = tracebl;
+ }
+ policy.encode(aclbl);
+ attrs[RGW_ATTR_ACL] = aclbl;
- if (!s->object.empty()) {
- if (need_object_expiration() || multipart_delete) {
- /* check if obj exists, read orig attrs */
- op_ret = get_obj_attrs(store, s, obj, attrs);
- if (op_ret < 0) {
- return;
- }
- }
+ populate_with_generic_attrs(s, attrs);
- if (multipart_delete) {
- const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
+ /* select encryption mode */
+ op_ret = prepare_encryption(attrs);
+ if (op_ret != 0)
+ return;
- if (slo_attr != attrs.end()) {
- op_ret = handle_slo_manifest(slo_attr->second);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
- }
- } else {
- op_ret = -ERR_NOT_SLO_MANIFEST;
- }
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ if (op_ret < 0) {
+ return;
+ }
- return;
- }
+ std::unique_ptr<rgw::sal::MultipartUpload> upload;
+ upload = s->bucket->get_multipart_upload(s->object->get_name(),
+ upload_id);
+ op_ret = upload->init(this, s->yield, s->obj_ctx, s->owner, s->dest_placement, attrs);
- RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
- obj_ctx->obj.set_atomic(obj);
+ if (op_ret == 0) {
+ upload_id = upload->get_upload_id();
+ }
+ s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, upload_id);
+ multipart_trace->UpdateName(tracing::rgw::MULTIPART + upload_id);
- bool ver_restored = false;
- op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
- s->bucket_info, obj, ver_restored);
- if (op_ret < 0) {
- return;
- }
+}
- if (!ver_restored) {
- /* Swift's versioning mechanism hasn't found any previous version of
- * the object that could be restored. This means we should proceed
- * with the regular delete path. */
- RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
- RGWRados::Object::Delete del_op(&del_target);
+int RGWCompleteMultipart::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
- &del_op.params.marker_version_id);
- if (op_ret < 0) {
- return;
- }
+ if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
- del_op.params.bucket_owner = s->bucket_owner.get_id();
- del_op.params.versioning_status = s->bucket_info.versioning_status();
- del_op.params.obj_owner = s->owner;
- del_op.params.unmod_since = unmod_since;
- del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ rgw::ARN obj_arn(s->object->get_obj());
+ if (s->iam_policy) {
+ e = s->iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject,
+ obj_arn,
+ princ_type);
+ }
+ if (e == Effect::Deny) {
+ return -EACCES;
+ }
- op_ret = del_op.delete_obj();
- if (op_ret >= 0) {
- delete_marker = del_op.result.delete_marker;
- version_id = del_op.result.version_id;
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
}
-
- /* Check whether the object has expired. Swift API documentation
- * stands that we should return 404 Not Found in such case. */
- if (need_object_expiration() && object_is_expired(attrs)) {
- op_ret = -ENOENT;
- return;
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow)) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return 0;
+ }
}
+ return -EACCES;
}
-
- if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
- op_ret = 0;
+ if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
+ return 0;
}
- } else {
- op_ret = -EINVAL;
- }
-}
-
-
-bool RGWCopyObj::parse_copy_location(const string& url_src, string& bucket_name, rgw_obj_key& key)
-{
- string name_str;
- string params_str;
-
- size_t pos = url_src.find('?');
- if (pos == string::npos) {
- name_str = url_src;
- } else {
- name_str = url_src.substr(0, pos);
- params_str = url_src.substr(pos + 1);
}
- string dec_src;
-
- url_decode(name_str, dec_src);
- const char *src = dec_src.c_str();
-
- if (*src == '/') ++src;
-
- string str(src);
-
- pos = str.find('/');
- if (pos ==string::npos)
- return false;
-
- bucket_name = str.substr(0, pos);
- key.name = str.substr(pos + 1);
-
- if (key.name.empty()) {
- return false;
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ return -EACCES;
}
- if (!params_str.empty()) {
- RGWHTTPArgs args;
- args.set(params_str);
- args.parse();
-
- key.instance = args.get("versionId", NULL);
- }
+ return 0;
+}
- return true;
+void RGWCompleteMultipart::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
}
-int RGWCopyObj::verify_permission()
+void RGWCompleteMultipart::execute(optional_yield y)
{
- RGWAccessControlPolicy src_policy(s->cct);
- op_ret = get_params();
- if (op_ret < 0)
- return op_ret;
+ RGWMultiCompleteUpload *parts;
+ RGWMultiXMLParser parser;
+ std::unique_ptr<rgw::sal::MultipartUpload> upload;
+ off_t ofs = 0;
+ std::unique_ptr<rgw::sal::Object> meta_obj;
+ std::unique_ptr<rgw::sal::Object> target_obj;
+ uint64_t olh_epoch = 0;
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
if (op_ret < 0) {
- return op_ret;
+ return;
}
- map<string, bufferlist> src_attrs;
-
- RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
- if (s->bucket_instance_id.empty()) {
- op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
- } else {
- /* will only happen in intra region sync where the source and dest bucket is the same */
- op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
- }
- if (op_ret < 0) {
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_SUCH_BUCKET;
- }
- return op_ret;
+ if (!data.length()) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- src_bucket = src_bucket_info.bucket;
+ if (!parser.init()) {
+ op_ret = -EIO;
+ return;
+ }
- /* get buckets info (source and dest) */
- if (s->local_source && source_zone.empty()) {
- rgw_obj src_obj(src_bucket, src_object);
- store->set_atomic(s->obj_ctx, src_obj);
- store->set_prefetch_data(s->obj_ctx, src_obj);
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
- /* check source object permissions */
- op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_policy,
- src_bucket, src_object);
- if (op_ret < 0) {
- return op_ret;
- }
+ parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
+ if (!parts || parts->parts.empty()) {
+ // CompletedMultipartUpload is incorrect but some versions of some libraries use it, see PR #41700
+ parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompletedMultipartUpload"));
+ }
- /* admin request overrides permission checks */
- if (! s->auth.identity->is_admin_of(src_policy.get_owner().get_id()) &&
- ! src_policy.verify_permission(*s->auth.identity, s->perm_mask,
- RGW_PERM_READ)) {
- return -EACCES;
- }
+ if (!parts || parts->parts.empty()) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- RGWAccessControlPolicy dest_bucket_policy(s->cct);
- map<string, bufferlist> dest_attrs;
- if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
- or intra region sync */
- dest_bucket_info = src_bucket_info;
- dest_attrs = src_attrs;
- } else {
- op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
- dest_bucket_info, nullptr, &dest_attrs);
- if (op_ret < 0) {
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_SUCH_BUCKET;
- }
- return op_ret;
- }
+ if ((int)parts->parts.size() >
+ s->cct->_conf->rgw_multipart_part_upload_limit) {
+ op_ret = -ERANGE;
+ return;
}
- dest_bucket = dest_bucket_info.bucket;
+ upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
- rgw_obj dest_obj(dest_bucket, dest_object);
- store->set_atomic(s->obj_ctx, dest_obj);
+ RGWCompressionInfo cs_info;
+ bool compressed = false;
+ uint64_t accounted_size = 0;
- /* check dest bucket permissions */
- op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
- &dest_bucket_policy, dest_bucket);
+ list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
+
+ meta_obj = upload->get_meta_obj();
+ meta_obj->set_in_extra_data(true);
+ meta_obj->set_hash_source(s->object->get_name());
+
+ /*take a cls lock on meta_obj to prevent racing completions (or retries)
+ from deleting the parts*/
+ int max_lock_secs_mp =
+ s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
+ utime_t dur(max_lock_secs_mp, 0);
+
+ serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart");
+ op_ret = serializer->try_lock(this, dur, y);
if (op_ret < 0) {
- return op_ret;
+ ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
+ if (op_ret == -ENOENT && check_previously_completed(parts)) {
+ ldpp_dout(this, 1) << "NOTICE: This multipart completion is already completed" << dendl;
+ op_ret = 0;
+ return;
+ }
+ op_ret = -ERR_INTERNAL_ERROR;
+ s->err.message = "This multipart completion is already in progress";
+ return;
}
- /* admin request overrides permission checks */
- if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
- ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
- RGW_PERM_WRITE)) {
- return -EACCES;
+ op_ret = meta_obj->get_obj_attrs(s->obj_ctx, s->yield, this);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
+ << " ret=" << op_ret << dendl;
+ return;
}
+ s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, upload_id);
+ jspan_context trace_ctx(false, false);
+ extract_span_context(meta_obj->get_attrs(), trace_ctx);
+ multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx);
+
- op_ret = init_dest_policy();
+ // make reservation for notification if needed
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, &s->object->get_name());
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
- return op_ret;
+ return;
}
- return 0;
-}
-
-
-int RGWCopyObj::init_common()
-{
- if (if_mod) {
- if (parse_time(if_mod, &mod_time) < 0) {
- op_ret = -EINVAL;
- return op_ret;
+ target_obj = s->bucket->get_object(rgw_obj_key(s->object->get_name()));
+ if (s->bucket->versioning_enabled()) {
+ if (!version_id.empty()) {
+ target_obj->set_instance(version_id);
+ } else {
+ target_obj->gen_rand_obj_instance_name();
+ version_id = target_obj->get_instance();
}
- mod_ptr = &mod_time;
}
+ target_obj->set_attrs(meta_obj->get_attrs());
- if (if_unmod) {
- if (parse_time(if_unmod, &unmod_time) < 0) {
- op_ret = -EINVAL;
- return op_ret;
- }
- unmod_ptr = &unmod_time;
+ op_ret = upload->complete(this, y, s->cct, parts->parts, remove_objs, accounted_size, compressed, cs_info, ofs, s->req_id, s->owner, olh_epoch, target_obj.get(), s->obj_ctx);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: upload complete failed ret=" << op_ret << dendl;
+ return;
}
- bufferlist aclbl;
- dest_policy.encode(aclbl);
- emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
-
- rgw_get_request_metadata(s->cct, s->info, attrs);
- populate_with_generic_attrs(s, attrs);
-
- return 0;
-}
+ // remove the upload meta object ; the meta object is not versioned
+ // when the bucket is, as that would add an unneeded delete marker
+ int r = meta_obj->delete_object(this, s->obj_ctx, y, true /* prevent versioning */);
+ if (r >= 0) {
+ /* serializer's exclusive lock is released */
+ serializer->clear_locked();
+ } else {
+ ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
+ }
-static void copy_obj_progress_cb(off_t ofs, void *param)
-{
- RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
- op->progress_cb(ofs);
-}
+ // send request to notification manager
+ int ret = res->publish_commit(this, ofs, target_obj->get_mtime(), etag, target_obj->get_instance());
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
+ }
+} // RGWCompleteMultipart::execute
-void RGWCopyObj::progress_cb(off_t ofs)
+bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUpload* parts)
{
- if (!s->cct->_conf->rgw_copy_obj_progress)
- return;
-
- if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
- return;
+ // re-calculate the etag from the parts and compare to the existing object
+ int ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
+ if (ret < 0) {
+ ldpp_dout(this, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl;
+ return false;
+ }
+ rgw::sal::Attrs sattrs = s->object->get_attrs();
+ string oetag = sattrs[RGW_ATTR_ETAG].to_str();
- send_partial_response(ofs);
+ MD5 hash;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ for (const auto& [index, part] : parts->parts) {
+ std::string partetag = rgw_string_unquote(part);
+ char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ hex_to_buf(partetag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
+ hash.Update((const unsigned char *)petag, sizeof(petag));
+ ldpp_dout(this, 20) << __func__ << "() re-calculating multipart etag: part: "
+ << index << ", etag: " << partetag << dendl;
+ }
+
+ unsigned char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
+ hash.Final(final_etag);
+ buf_to_hex(final_etag, CEPH_CRYPTO_MD5_DIGESTSIZE, final_etag_str);
+ snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
+ "-%lld", (long long)parts->parts.size());
- last_ofs = ofs;
+ if (oetag.compare(final_etag_str) != 0) {
+ ldpp_dout(this, 1) << __func__ << "() NOTICE: etag mismatch: object etag:"
+ << oetag << ", re-calculated etag:" << final_etag_str << dendl;
+ return false;
+ }
+ ldpp_dout(this, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl;
+ return true;
}
-void RGWCopyObj::pre_exec()
+void RGWCompleteMultipart::complete()
{
- rgw_bucket_object_pre_exec(s);
+ /* release exclusive lock iff not already */
+ if (unlikely(serializer && serializer->locked)) {
+ int r = serializer->unlock();
+ if (r < 0) {
+ ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer->oid << dendl;
+ }
+ }
+ send_response();
}
-void RGWCopyObj::execute()
+int RGWAbortMultipart::verify_permission(optional_yield y)
{
- if (init_common() < 0)
- return;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- rgw_obj src_obj(src_bucket, src_object);
- rgw_obj dst_obj(dest_bucket, dest_object);
-
- RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
- obj_ctx.obj.set_atomic(src_obj);
- obj_ctx.obj.set_atomic(dst_obj);
+ if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3AbortMultipartUpload,
+ s->object->get_obj());
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
- encode_delete_at_attr(delete_at, attrs);
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ ARN obj_arn(s->object->get_obj());
+ if (s->iam_policy) {
+ e = s->iam_policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3AbortMultipartUpload,
+ obj_arn, princ_type);
+ }
- bool high_precision_time = (s->system_request);
+ if (e == Effect::Deny) {
+ return -EACCES;
+ }
- /* Handle object versioning of Swift API. In case of copying to remote this
- * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
- op_ret = store->swift_versioning_copy(obj_ctx,
- dest_bucket_info.owner,
- dest_bucket_info,
- dst_obj);
- if (op_ret < 0) {
- return;
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject,
+ s->object->get_obj());
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow)) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return 0;
+ }
+ }
+ return -EACCES;
+ }
+ if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
+ return 0;
+ }
}
- op_ret = store->copy_obj(obj_ctx,
- s->user->user_id,
- client_id,
- op_id,
- &s->info,
- source_zone,
- dst_obj,
- src_obj,
- dest_bucket_info,
- src_bucket_info,
- &src_mtime,
- &mtime,
- mod_ptr,
- unmod_ptr,
- high_precision_time,
- if_match,
- if_nomatch,
- attrs_mod,
- copy_if_newer,
- attrs, RGW_OBJ_CATEGORY_MAIN,
- olh_epoch,
- (delete_at ? *delete_at : real_time()),
- (version_id.empty() ? NULL : &version_id),
- &s->req_id, /* use req_id as tag */
- &etag,
- &s->err,
- copy_obj_progress_cb, (void *)this
- );
-}
-
-int RGWGetACLs::verify_permission()
-{
- bool perm;
- if (!s->object.empty()) {
- perm = verify_object_permission(s, RGW_PERM_READ_ACP);
- } else {
- perm = verify_bucket_permission(s, RGW_PERM_READ_ACP);
- }
- if (!perm)
+ if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
return -EACCES;
+ }
return 0;
}
-void RGWGetACLs::pre_exec()
+void RGWAbortMultipart::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWGetACLs::execute()
+void RGWAbortMultipart::execute(optional_yield y)
{
- stringstream ss;
- RGWAccessControlPolicy *acl = (!s->object.empty() ? s->object_acl : s->bucket_acl);
- RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(acl);
- s3policy->to_xml(ss);
- acls = ss.str();
-}
+ op_ret = -EINVAL;
+ string upload_id;
+ upload_id = s->info.args.get("uploadId");
+ std::unique_ptr<rgw::sal::Object> meta_obj;
+ std::unique_ptr<rgw::sal::MultipartUpload> upload;
+ if (upload_id.empty() || rgw::sal::Object::empty(s->object.get()))
+ return;
+ upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
+ RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
-int RGWPutACLs::verify_permission()
-{
- bool perm;
- if (!s->object.empty()) {
- perm = verify_object_permission(s, RGW_PERM_WRITE_ACP);
- } else {
- perm = verify_bucket_permission(s, RGW_PERM_WRITE_ACP);
+ jspan_context trace_ctx(false, false);
+ if (tracing::rgw::tracer.is_enabled()) {
+ // read meta object attributes for trace info
+ meta_obj = upload->get_meta_obj();
+ meta_obj->set_in_extra_data(true);
+ meta_obj->get_obj_attrs(obj_ctx, s->yield, this);
+ extract_span_context(meta_obj->get_attrs(), trace_ctx);
}
- if (!perm)
- return -EACCES;
+ multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx);
- return 0;
+ op_ret = upload->abort(this, s->cct, obj_ctx);
}
-int RGWGetLC::verify_permission()
+int RGWListMultipart::verify_permission(optional_yield y)
{
- bool perm;
- perm = verify_bucket_permission(s, RGW_PERM_READ_ACP);
- if (!perm)
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+
+ if (!verify_object_permission(this, s, rgw::IAM::s3ListMultipartUploadParts))
return -EACCES;
return 0;
}
-int RGWPutLC::verify_permission()
+void RGWListMultipart::pre_exec()
{
- bool perm;
- perm = verify_bucket_permission(s, RGW_PERM_WRITE_ACP);
- if (!perm)
- return -EACCES;
-
- return 0;
+ rgw_bucket_object_pre_exec(s);
}
-int RGWDeleteLC::verify_permission()
+void RGWListMultipart::execute(optional_yield y)
{
- bool perm;
- perm = verify_bucket_permission(s, RGW_PERM_WRITE_ACP);
- if (!perm)
- return -EACCES;
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
- return 0;
-}
+ upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
-void RGWPutACLs::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ rgw::sal::Attrs attrs;
+ op_ret = upload->get_info(this, s->yield, s->obj_ctx, &placement, &attrs);
+ /* decode policy */
+ map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_ACL);
+ if (iter != attrs.end()) {
+ auto bliter = iter->second.cbegin();
+ try {
+ policy.decode(bliter);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ op_ret = -EIO;
+ }
+ }
+ if (op_ret < 0)
+ return;
-void RGWGetLC::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ op_ret = upload->list_parts(this, s->cct, max_parts, marker, NULL, &truncated);
}
-void RGWPutLC::pre_exec()
+int RGWListBucketMultiparts::verify_permission(optional_yield y)
{
- rgw_bucket_object_pre_exec(s);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this,
+ s,
+ rgw::IAM::s3ListBucketMultipartUploads))
+ return -EACCES;
+
+ return 0;
}
-void RGWDeleteLC::pre_exec()
+void RGWListBucketMultiparts::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWPutACLs::execute()
+void RGWListBucketMultiparts::execute(optional_yield y)
{
- bufferlist bl;
-
- RGWAccessControlPolicy_S3 *policy = NULL;
- RGWACLXMLParser_S3 parser(s->cct);
- RGWAccessControlPolicy_S3 new_policy(s->cct);
- stringstream ss;
- char *new_data = NULL;
- rgw_obj obj;
-
- op_ret = 0; /* XXX redundant? */
-
- if (!parser.init()) {
- op_ret = -EINVAL;
- return;
- }
-
-
- RGWAccessControlPolicy *existing_policy = (s->object.empty() ? s->bucket_acl : s->object_acl);
-
- owner = existing_policy->get_owner();
-
- op_ret = get_params();
+ op_ret = get_params(y);
if (op_ret < 0)
return;
- ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
+ if (s->prot_flags & RGW_REST_SWIFT) {
+ string path_args;
+ path_args = s->info.args.get("path");
+ if (!path_args.empty()) {
+ if (!delimiter.empty() || !prefix.empty()) {
+ op_ret = -EINVAL;
+ return;
+ }
+ prefix = path_args;
+ delimiter="/";
+ }
+ }
- if (!s->canned_acl.empty() && len) {
- op_ret = -EINVAL;
+ op_ret = s->bucket->list_multiparts(this, prefix, marker_meta,
+ delimiter, max_uploads, uploads,
+ &common_prefixes, &is_truncated);
+ if (op_ret < 0) {
return;
}
- if (!s->canned_acl.empty() || s->has_acl_header) {
- op_ret = get_policy_from_state(store, s, ss);
- if (op_ret < 0)
- return;
-
- new_data = strdup(ss.str().c_str());
- free(data);
- data = new_data;
- len = ss.str().size();
+ if (!uploads.empty()) {
+ next_marker_key = uploads.back()->get_key();
+ next_marker_upload_id = uploads.back()->get_upload_id();
}
+}
- if (!parser.parse(data, len, 1)) {
- op_ret = -EINVAL;
- return;
+void RGWGetHealthCheck::execute(optional_yield y)
+{
+ if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
+ (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
+ /* Disabling path specified & existent in the filesystem. */
+ op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
+ } else {
+ op_ret = 0; /* 200 OK */
}
- policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
- if (!policy) {
- op_ret = -EINVAL;
- return;
+}
+
+int RGWDeleteMultiObj::verify_permission(optional_yield y)
+{
+ int op_ret = get_params(y);
+ if (op_ret) {
+ return op_ret;
}
- // forward bucket acl requests to meta master zone
- if (s->object.empty() && !store->is_meta_master()) {
- bufferlist in_data;
- // include acl data unless it was generated from a canned_acl
- if (s->canned_acl.empty()) {
- in_data.append(data, len);
- }
- op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
- if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
- return;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+
+ if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
+ if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
+ ARN bucket_arn(s->bucket->get_key());
+ auto r = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()));
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ } else if (r == Effect::Pass && s->iam_policy) {
+ r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention,
+ bucket_arn);
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ }
+ } else if (r == Effect::Pass && !s->session_policies.empty()) {
+ r = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()));
+ if (r == Effect::Deny) {
+ bypass_perm = false;
+ }
+ }
}
- }
- if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
- ldout(s->cct, 15) << "Old AccessControlPolicy";
- policy->to_xml(*_dout);
- *_dout << dendl;
- }
+ bool not_versioned = rgw::sal::Object::empty(s->object.get()) || s->object->get_instance().empty();
- op_ret = policy->rebuild(store, &owner, new_policy);
- if (op_ret < 0)
- return;
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ not_versioned ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ ARN(s->bucket->get_key()));
+ if (identity_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
- if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
- ldout(s->cct, 15) << "New AccessControlPolicy:";
- new_policy.to_xml(*_dout);
- *_dout << dendl;
+ rgw::IAM::Effect r = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ rgw::ARN bucket_arn(s->bucket->get_key());
+ if (s->iam_policy) {
+ r = s->iam_policy->eval(s->env, *s->auth.identity,
+ not_versioned ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ bucket_arn,
+ princ_type);
+ }
+ if (r == Effect::Deny)
+ return -EACCES;
+
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ not_versioned ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ ARN(s->bucket->get_key()));
+ if (session_policy_res == Effect::Deny) {
+ return -EACCES;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && r == Effect::Allow)) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) {
+ return 0;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return 0;
+ }
+ }
+ return -EACCES;
+ }
+ if (r == Effect::Allow || identity_policy_res == Effect::Allow)
+ return 0;
}
- new_policy.encode(bl);
- map<string, bufferlist> attrs;
+ acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
+ if (!acl_allowed)
+ return -EACCES;
- if (!s->object.empty()) {
- obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
- //if instance is empty, we should modify the latest object
- op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
- } else {
- attrs = s->bucket_attrs;
- attrs[RGW_ATTR_ACL] = bl;
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
- }
- if (op_ret == -ECANCELED) {
- op_ret = 0; /* lost a race, but it's ok because acls are immutable */
- }
+ return 0;
}
-static void get_lc_oid(struct req_state *s, string& oid)
+void RGWDeleteMultiObj::pre_exec()
{
- string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
- int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
- int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
- oid = lc_oid_prefix;
- char buf[32];
- snprintf(buf, 32, ".%d", index);
- oid.append(buf);
- return;
+ rgw_bucket_object_pre_exec(s);
}
-void RGWPutLC::execute()
+void RGWDeleteMultiObj::execute(optional_yield y)
{
- bufferlist bl;
-
- RGWLifecycleConfiguration_S3 *config = NULL;
- RGWLCXMLParser_S3 parser(s->cct);
- RGWLifecycleConfiguration_S3 new_config(s->cct);
+ RGWMultiDelDelete *multi_delete;
+ vector<rgw_obj_key>::iterator iter;
+ RGWMultiDelXMLParser parser;
+ RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
+ char* buf;
- if (!parser.init()) {
+ buf = data.c_str();
+ if (!buf) {
op_ret = -EINVAL;
- return;
+ goto error;
}
- op_ret = get_params();
- if (op_ret < 0)
- return;
-
- ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
-
- if (!parser.parse(data, len, 1)) {
- op_ret = -ERR_MALFORMED_XML;
- return;
+ if (!parser.init()) {
+ op_ret = -EINVAL;
+ goto error;
}
- config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
- if (!config) {
- op_ret = -ERR_MALFORMED_XML;
- return;
+
+ if (!parser.parse(buf, data.length(), 1)) {
+ op_ret = -EINVAL;
+ goto error;
}
- if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
- ldout(s->cct, 15) << "Old LifecycleConfiguration:";
- config->to_xml(*_dout);
- *_dout << dendl;
+ multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
+ if (!multi_delete) {
+ op_ret = -EINVAL;
+ goto error;
+ } else {
+#define DELETE_MULTI_OBJ_MAX_NUM 1000
+ int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
+ if (max_num < 0) {
+ max_num = DELETE_MULTI_OBJ_MAX_NUM;
+ }
+ int multi_delete_object_num = multi_delete->objects.size();
+ if (multi_delete_object_num > max_num) {
+ op_ret = -ERR_MALFORMED_XML;
+ goto error;
+ }
}
- op_ret = config->rebuild(store, new_config);
- if (op_ret < 0)
- return;
+ if (multi_delete->is_quiet())
+ quiet = true;
- if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
- ldout(s->cct, 15) << "New LifecycleConfiguration:";
- new_config.to_xml(*_dout);
- *_dout << dendl;
- }
-
- new_config.encode(bl);
- map<string, bufferlist> attrs;
- attrs = s->bucket_attrs;
- attrs[RGW_ATTR_LC] = bl;
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
- if (op_ret < 0)
- return;
- string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
- string oid;
- get_lc_oid(s, oid);
- pair<string, int> entry(shard_id, lc_uninitial);
- int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
- rados::cls::lock::Lock l(lc_index_lock_name);
- utime_t time(max_lock_secs, 0);
- l.set_duration(time);
- l.set_cookie(cookie);
- librados::IoCtx *ctx = store->get_lc_pool_ctx();
- do {
- op_ret = l.lock_exclusive(ctx, oid);
- if (op_ret == -EBUSY) {
- dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
- sleep(5);
- continue;
- }
- if (op_ret < 0) {
- dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
- break;
+ if (s->bucket->get_info().mfa_enabled()) {
+ bool has_versioned = false;
+ for (auto i : multi_delete->objects) {
+ if (!i.instance.empty()) {
+ has_versioned = true;
+ break;
+ }
}
- op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
- if (op_ret < 0) {
- dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
+ if (has_versioned && !s->mfa_verified) {
+ ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
+ op_ret = -ERR_MFA_REQUIRED;
+ goto error;
}
- break;
- }while(1);
- l.unlock(ctx, oid);
- return;
-}
+ }
-void RGWDeleteLC::execute()
-{
- bufferlist bl;
- map<string, bufferlist> orig_attrs, attrs;
- map<string, bufferlist>::iterator iter;
- rgw_raw_obj obj;
- store->get_bucket_instance_obj(s->bucket, obj);
- store->set_prefetch_data(s->obj_ctx, obj);
- op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
- if (op_ret < 0)
- return;
-
- for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
- const string& name = iter->first;
- dout(10) << "DeleteLC : attr: " << name << dendl;
- if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
- if (attrs.find(name) == attrs.end()) {
- attrs[name] = iter->second;
+ begin_response();
+ if (multi_delete->objects.empty()) {
+ goto done;
+ }
+
+ for (iter = multi_delete->objects.begin();
+ iter != multi_delete->objects.end();
+ ++iter) {
+ std::string version_id;
+ std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(*iter);
+ if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ iter->instance.empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ ARN(obj->get_obj()));
+ if (identity_policy_res == Effect::Deny) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+
+ rgw::IAM::Effect e = Effect::Pass;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ if (s->iam_policy) {
+ ARN obj_arn(obj->get_obj());
+ e = s->iam_policy->eval(s->env,
+ *s->auth.identity,
+ iter->instance.empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ obj_arn,
+ princ_type);
+ }
+ if (e == Effect::Deny) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ iter->instance.empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion,
+ ARN(obj->get_obj()));
+ if (session_policy_res == Effect::Deny) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
+ (session_policy_res != Effect::Allow || e != Effect::Allow)) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
}
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
+ }
+
+ if ((identity_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
+ send_partial_response(*iter, false, "", -EACCES);
+ continue;
}
}
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
- string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
- pair<string, int> entry(shard_id, lc_uninitial);
- string oid;
- get_lc_oid(s, oid);
- int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
- librados::IoCtx *ctx = store->get_lc_pool_ctx();
- rados::cls::lock::Lock l(lc_index_lock_name);
- utime_t time(max_lock_secs, 0);
- l.set_duration(time);
- do {
- op_ret = l.lock_exclusive(ctx, oid);
- if (op_ret == -EBUSY) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
- sleep(5);
- continue;
- }
- if (op_ret < 0) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
- break;
+
+ uint64_t obj_size = 0;
+ std::string etag;
+
+ if (!rgw::sal::Object::empty(obj.get())) {
+ RGWObjState* astate = nullptr;
+ bool check_obj_lock = obj->have_instance() && bucket->get_info().obj_lock_enabled();
+ const auto ret = obj->get_obj_state(this, obj_ctx, &astate, s->yield, true);
+
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ // object maybe delete_marker, skip check_obj_lock
+ check_obj_lock = false;
+ } else {
+ // Something went wrong.
+ send_partial_response(*iter, false, "", ret);
+ continue;
+ }
+ } else {
+ obj_size = astate->size;
+ etag = astate->attrset[RGW_ATTR_ETAG].to_str();
+ }
+
+ if (check_obj_lock) {
+ ceph_assert(astate);
+ int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode);
+ if (object_lock_response != 0) {
+ send_partial_response(*iter, false, "", object_lock_response);
+ continue;
+ }
+ }
}
- op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
+
+ // make reservation for notification if needed
+ const auto versioned_object = s->bucket->versioning_enabled();
+ const auto event_type = versioned_object && obj->get_instance().empty() ?
+ rgw::notify::ObjectRemovedDeleteMarkerCreated :
+ rgw::notify::ObjectRemovedDelete;
+ std::unique_ptr<rgw::sal::Notification> res
+ = store->get_notification(obj.get(), s->src_object.get(), s, event_type);
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
+ send_partial_response(*iter, false, "", op_ret);
+ continue;
}
- break;
- }while(1);
- l.unlock(ctx, oid);
- return;
-}
-int RGWGetCORS::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
- }
+ obj->set_atomic(obj_ctx);
- return 0;
-}
+ std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op(obj_ctx);
+ del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status();
+ del_op->params.obj_owner = s->owner;
+ del_op->params.bucket_owner = s->bucket_owner;
+ del_op->params.marker_version_id = version_id;
-void RGWGetCORS::execute()
-{
- op_ret = read_bucket_cors();
- if (op_ret < 0)
- return ;
+ op_ret = del_op->delete_obj(this, y);
+ if (op_ret == -ENOENT) {
+ op_ret = 0;
+ }
- if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
- op_ret = -ENOENT;
- return;
- }
-}
+ send_partial_response(*iter, obj->get_delete_marker(), del_op->result.version_id, op_ret);
-int RGWPutCORS::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
+ // send request to notification manager
+ int ret = res->publish_commit(this, obj_size, ceph::real_clock::now(), etag, version_id);
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
+ // too late to rollback operation, hence op_ret is not set here
+ }
}
- return 0;
-}
+ /* set the return code to zero, errors at this point will be
+ dumped to the response */
+ op_ret = 0;
-void RGWPutCORS::execute()
-{
- rgw_raw_obj obj;
+done:
+ // will likely segfault if begin_response() has not been called
+ end_response();
+ return;
- op_ret = get_params();
- if (op_ret < 0)
- return;
+error:
+ send_status();
+ return;
- map<string, bufferlist> attrs = s->bucket_attrs;
- attrs[RGW_ATTR_CORS] = cors_bl;
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
}
-int RGWDeleteCORS::verify_permission()
+bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
+ map<string, bufferlist>& battrs,
+ ACLOwner& bucket_owner /* out */,
+ optional_yield y)
{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
+ RGWAccessControlPolicy bacl(store->ctx());
+ int ret = read_bucket_policy(dpp, store, s, binfo, battrs, &bacl, binfo.bucket, y);
+ if (ret < 0) {
+ return false;
}
- return 0;
+ auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
+
+ bucket_owner = bacl.get_owner();
+
+ /* We can use global user_acl because each BulkDelete request is allowed
+ * to work on entities from a single account only. */
+ return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(),
+ &bacl, policy, s->iam_user_policies, s->session_policies, rgw::IAM::s3DeleteBucket);
}
-void RGWDeleteCORS::execute()
+bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yield y)
{
- op_ret = read_bucket_cors();
- if (op_ret < 0)
- return;
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ ACLOwner bowner;
+ RGWObjVersionTracker ot;
- bufferlist bl;
- rgw_raw_obj obj;
- if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
- op_ret = -ENOENT;
- return;
+ int ret = store->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y);
+ if (ret < 0) {
+ goto binfo_fail;
}
- store->get_bucket_instance_obj(s->bucket, obj);
- store->set_prefetch_data(s->obj_ctx, obj);
- map<string, bufferlist> orig_attrs, attrs, rmattrs;
- map<string, bufferlist>::iterator iter;
- op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
- if (op_ret < 0)
- return;
+ ret = bucket->load_bucket(dpp, s->yield);
+ if (ret < 0) {
+ goto binfo_fail;
+ }
- /* only remove meta attrs */
- for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
- const string& name = iter->first;
- dout(10) << "DeleteCORS : attr: " << name << dendl;
- if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
- rmattrs[name] = iter->second;
- } else if (attrs.find(name) == attrs.end()) {
- attrs[name] = iter->second;
- }
+ if (!verify_permission(bucket->get_info(), bucket->get_attrs(), bowner, y)) {
+ ret = -EACCES;
+ goto auth_fail;
}
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
-}
-void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
- get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
-}
+ if (!path.obj_key.empty()) {
+ ACLOwner bucket_owner;
-int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
- rule = cc->host_name_rule(origin);
- if (!rule) {
- dout(10) << "There is no cors rule present for " << origin << dendl;
- return -ENOENT;
- }
+ bucket_owner.set_id(bucket->get_info().owner);
+ std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(path.obj_key);
+ obj->set_atomic(s->obj_ctx);
- if (!validate_cors_rule_method(rule, req_meth)) {
- return -ENOENT;
+ std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op(s->obj_ctx);
+ del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status();
+ del_op->params.obj_owner = bowner;
+ del_op->params.bucket_owner = bucket_owner;
+
+ ret = del_op->delete_obj(dpp, y);
+ if (ret < 0) {
+ goto delop_fail;
+ }
+ } else {
+ ret = bucket->remove_bucket(dpp, false, true, &s->info, s->yield);
+ if (ret < 0) {
+ goto delop_fail;
+ }
}
- return 0;
-}
-void RGWOptionsCORS::execute()
-{
- op_ret = read_bucket_cors();
- if (op_ret < 0)
- return;
+ num_deleted++;
+ return true;
- origin = s->info.env->get("HTTP_ORIGIN");
- if (!origin) {
- dout(0) <<
- "Preflight request without mandatory Origin header"
- << dendl;
- op_ret = -EINVAL;
- return;
- }
- req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
- if (!req_meth) {
- dout(0) <<
- "Preflight request without mandatory Access-control-request-method header"
- << dendl;
- op_ret = -EINVAL;
- return;
- }
- if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
- op_ret = -ENOENT;
- return;
- }
- req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
- op_ret = validate_cors_request(&bucket_cors);
- if (!rule) {
- origin = req_meth = NULL;
- return;
+binfo_fail:
+ if (-ENOENT == ret) {
+ ldpp_dout(dpp, 20) << "cannot find bucket = " << path.bucket_name << dendl;
+ num_unfound++;
+ } else {
+ ldpp_dout(dpp, 20) << "cannot get bucket info, ret = " << ret << dendl;
+
+ fail_desc_t failed_item = {
+ .err = ret,
+ .path = path
+ };
+ failures.push_back(failed_item);
+ }
+ return false;
+
+auth_fail:
+ ldpp_dout(dpp, 20) << "wrong auth for " << path << dendl;
+ {
+ fail_desc_t failed_item = {
+ .err = ret,
+ .path = path
+ };
+ failures.push_back(failed_item);
+ }
+ return false;
+
+delop_fail:
+ if (-ENOENT == ret) {
+ ldpp_dout(dpp, 20) << "cannot find entry " << path << dendl;
+ num_unfound++;
+ } else {
+ fail_desc_t failed_item = {
+ .err = ret,
+ .path = path
+ };
+ failures.push_back(failed_item);
+ }
+ return false;
+}
+
+bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths, optional_yield y)
+{
+ ldpp_dout(dpp, 20) << "in delete_chunk" << dendl;
+ for (auto path : paths) {
+ ldpp_dout(dpp, 20) << "bulk deleting path: " << path << dendl;
+ delete_single(path, y);
}
- return;
+
+ return true;
}
-int RGWGetRequestPayment::verify_permission()
+int RGWBulkDelete::verify_permission(optional_yield y)
{
return 0;
}
-void RGWGetRequestPayment::pre_exec()
+void RGWBulkDelete::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWGetRequestPayment::execute()
+void RGWBulkDelete::execute(optional_yield y)
{
- requester_pays = s->bucket_info.requester_pays;
-}
+ deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
-int RGWSetRequestPayment::verify_permission()
-{
- if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
- return -EACCES;
- }
+ bool is_truncated = false;
+ do {
+ list<RGWBulkDelete::acct_path_t> items;
- return 0;
-}
+ int ret = get_data(items, &is_truncated);
+ if (ret < 0) {
+ return;
+ }
-void RGWSetRequestPayment::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ ret = deleter->delete_chunk(items, y);
+ } while (!op_ret && is_truncated);
+
+ return;
}
-void RGWSetRequestPayment::execute()
-{
- op_ret = get_params();
- if (op_ret < 0)
- return;
+constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
- s->bucket_info.requester_pays = requester_pays;
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
- &s->bucket_attrs);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
- << " returned err=" << op_ret << dendl;
- return;
+int RGWBulkUploadOp::verify_permission(optional_yield y)
+{
+ if (s->auth.identity->is_anonymous()) {
+ return -EACCES;
}
-}
-int RGWInitMultipart::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE))
+ if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ return -EACCES;
+ }
+
+ if (s->user->get_tenant() != s->bucket_tenant) {
+ ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
+ << " (user_id.tenant=" << s->user->get_tenant()
+ << " requested=" << s->bucket_tenant << ")" << dendl;
return -EACCES;
+ }
+
+ if (s->user->get_max_buckets() < 0) {
+ return -EPERM;
+ }
return 0;
}
-void RGWInitMultipart::pre_exec()
+void RGWBulkUploadOp::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWInitMultipart::execute()
+boost::optional<std::pair<std::string, rgw_obj_key>>
+RGWBulkUploadOp::parse_path(const std::string_view& path)
{
- bufferlist aclbl;
- map<string, bufferlist> attrs;
- rgw_obj obj;
-
- if (get_params() < 0)
- return;
-
- if (s->object.empty())
- return;
-
- policy.encode(aclbl);
- attrs[RGW_ATTR_ACL] = aclbl;
-
- populate_with_generic_attrs(s, attrs);
-
- /* select encryption mode */
- op_ret = prepare_encryption(attrs);
- if (op_ret != 0)
- return;
-
- rgw_get_request_metadata(s->cct, s->info, attrs);
-
- do {
- char buf[33];
- gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
- upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
- upload_id.append(buf);
-
- string tmp_obj_name;
- RGWMPObj mp(s->object.name, upload_id);
- tmp_obj_name = mp.get_meta();
-
- obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
- // the meta object will be indexed with 0 size, we c
- obj.set_in_extra_data(true);
- obj.index_hash_source = s->object.name;
+ /* We need to skip all slashes at the beginning in order to preserve
+ * compliance with Swift. */
+ const size_t start_pos = path.find_first_not_of('/');
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
- op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
+ if (std::string_view::npos != start_pos) {
+ /* Seperator is the first slash after the leading ones. */
+ const size_t sep_pos = path.substr(start_pos).find('/');
- RGWRados::Object::Write obj_op(&op_target);
+ if (std::string_view::npos != sep_pos) {
+ const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
+ const auto obj_name = path.substr(sep_pos + 1);
- obj_op.meta.owner = s->owner.get_id();
- obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
- obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
+ return std::make_pair(std::string(bucket_name),
+ rgw_obj_key(std::string(obj_name)));
+ } else {
+ /* It's guaranteed here that bucket name is at least one character
+ * long and is different than slash. */
+ return std::make_pair(std::string(path.substr(start_pos)),
+ rgw_obj_key());
+ }
+ }
- op_ret = obj_op.write_meta(0, 0, attrs);
- } while (op_ret == -EEXIST);
+ return none;
}
-static int get_multipart_info(RGWRados *store, struct req_state *s,
- string& meta_oid,
- RGWAccessControlPolicy *policy,
- map<string, bufferlist>& attrs)
+std::pair<std::string, std::string>
+RGWBulkUploadOp::handle_upload_path(struct req_state *s)
{
- map<string, bufferlist>::iterator iter;
- bufferlist header;
-
- rgw_obj obj;
- obj.init_ns(s->bucket, meta_oid, mp_ns);
- obj.set_in_extra_data(true);
+ std::string bucket_path, file_prefix;
+ if (! s->init_state.url_bucket.empty()) {
+ file_prefix = bucket_path = s->init_state.url_bucket + "/";
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ const std::string& object_name = s->object->get_name();
- int op_ret = get_obj_attrs(store, s, obj, attrs);
- if (op_ret < 0) {
- if (op_ret == -ENOENT) {
- return -ERR_NO_SUCH_UPLOAD;
+ /* As rgw_obj_key::empty() already verified emptiness of s->object->get_name(),
+ * we can safely examine its last element. */
+ if (object_name.back() == '/') {
+ file_prefix.append(object_name);
+ } else {
+ file_prefix.append(object_name).append("/");
+ }
}
- return op_ret;
}
+ return std::make_pair(bucket_path, file_prefix);
+}
- if (policy) {
- for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
- string name = iter->first;
- if (name.compare(RGW_ATTR_ACL) == 0) {
- bufferlist& bl = iter->second;
- bufferlist::iterator bli = bl.begin();
- try {
- ::decode(*policy, bli);
- } catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
- return -EIO;
- }
- break;
- }
+int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y)
+{
+ if (s->user->get_max_buckets() > 0) {
+ rgw::sal::BucketList buckets;
+ std::string marker;
+ op_ret = s->user->list_buckets(this, marker, std::string(), s->user->get_max_buckets(),
+ false, buckets, y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ if (buckets.count() >= static_cast<size_t>(s->user->get_max_buckets())) {
+ return -ERR_TOO_MANY_BUCKETS;
}
}
return 0;
}
-int RGWCompleteMultipart::verify_permission()
+static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name)
{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE))
- return -EACCES;
+ /* the request of container or object level will contain bucket name.
+ * only at account level need to append the bucket name */
+ if (info.script_uri.find(bucket_name) != std::string::npos) {
+ return;
+ }
- return 0;
+ ldpp_dout(dpp, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
+ info.script_uri.append("/").append(bucket_name);
+ info.request_uri_aws4 = info.request_uri = info.script_uri;
+ info.effective_uri = "/" + bucket_name;
}
-void RGWCompleteMultipart::pre_exec()
+void RGWBulkUploadOp::init(rgw::sal::Store* const store,
+ struct req_state* const s,
+ RGWHandler* const h)
{
- rgw_bucket_object_pre_exec(s);
+ RGWOp::init(store, s, h);
}
-void RGWCompleteMultipart::execute()
+int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y)
{
- RGWMultiCompleteUpload *parts;
- map<int, string>::iterator iter;
- RGWMultiXMLParser parser;
- string meta_oid;
- map<uint32_t, RGWUploadPartInfo> obj_parts;
- map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
- map<string, bufferlist> attrs;
- off_t ofs = 0;
- MD5 hash;
- char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
- char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
- bufferlist etag_bl;
- rgw_obj meta_obj;
- rgw_obj target_obj;
- RGWMPObj mp;
- RGWObjManifest manifest;
- uint64_t olh_epoch = 0;
- string version_id;
+ ldpp_dout(this, 20) << "got directory=" << path << dendl;
- op_ret = get_params();
- if (op_ret < 0)
- return;
- op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
+ op_ret = handle_dir_verify_permission(y);
if (op_ret < 0) {
- return;
- }
-
- if (!data || !len) {
- op_ret = -ERR_MALFORMED_XML;
- return;
- }
-
- if (!parser.init()) {
- op_ret = -EIO;
- return;
- }
-
- if (!parser.parse(data, len, 1)) {
- op_ret = -ERR_MALFORMED_XML;
- return;
- }
-
- parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
- if (!parts || parts->parts.empty()) {
- op_ret = -ERR_MALFORMED_XML;
- return;
- }
-
- if ((int)parts->parts.size() >
- s->cct->_conf->rgw_multipart_part_upload_limit) {
- op_ret = -ERANGE;
- return;
+ return op_ret;
}
- mp.init(s->object.name, upload_id);
- meta_oid = mp.get_meta();
-
- int total_parts = 0;
- int handled_parts = 0;
- int max_parts = 1000;
- int marker = 0;
- bool truncated;
- RGWCompressionInfo cs_info;
- bool compressed = false;
- uint64_t accounted_size = 0;
+ std::string bucket_name;
+ rgw_obj_key object_junk;
+ std::tie(bucket_name, object_junk) = *parse_path(path);
- uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
+ rgw_raw_obj obj(store->get_zone()->get_params().domain_root,
+ rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
- list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
+ /* we need to make sure we read bucket info, it's not read before for this
+ * specific request */
+ std::unique_ptr<rgw::sal::Bucket> bucket;
- bool versioned_object = s->bucket_info.versioning_enabled();
+ /* Create metadata: ACLs. */
+ std::map<std::string, ceph::bufferlist> attrs;
+ RGWAccessControlPolicy policy;
+ policy.create_default(s->user->get_id(), s->user->get_display_name());
+ ceph::bufferlist aclbl;
+ policy.encode(aclbl);
+ attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
- iter = parts->parts.begin();
+ obj_version objv, ep_objv;
+ bool bucket_exists;
+ RGWQuotaInfo quota_info;
+ const RGWQuotaInfo* pquota_info = nullptr;
+ RGWBucketInfo out_info;
+ string swift_ver_location;
+ rgw_bucket new_bucket;
+ req_info info = s->info;
+ new_bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
+ new_bucket.name = bucket_name;
+ rgw_placement_rule placement_rule;
+ placement_rule.storage_class = s->info.storage_class;
+ forward_req_info(this, s->cct, info, bucket_name);
+
+ op_ret = s->user->create_bucket(this, new_bucket,
+ store->get_zone()->get_zonegroup().get_id(),
+ placement_rule, swift_ver_location,
+ pquota_info, policy, attrs,
+ out_info, ep_objv,
+ true, false, &bucket_exists,
+ info, &bucket, y);
+ /* continue if EEXIST and create_bucket will fail below. this way we can
+ * recover from a partial create by retrying it. */
+ ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
+ << ", bucket=" << bucket << dendl;
- meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
- meta_obj.set_in_extra_data(true);
- meta_obj.index_hash_source = s->object.name;
+ return op_ret;
+}
- op_ret = get_obj_attrs(store, s, meta_obj, attrs);
+bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
+ const rgw_obj& obj,
+ std::map<std::string, ceph::bufferlist>& battrs,
+ ACLOwner& bucket_owner /* out */,
+ optional_yield y)
+{
+ RGWAccessControlPolicy bacl(store->ctx());
+ op_ret = read_bucket_policy(this, store, s, binfo, battrs, &bacl, binfo.bucket, y);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
- << " ret=" << op_ret << dendl;
- return;
+ ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
+ return false;
}
- do {
- op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
- marker, obj_parts, &marker, &truncated);
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_SUCH_UPLOAD;
- }
- if (op_ret < 0)
- return;
+ auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
- total_parts += obj_parts.size();
- if (!truncated && total_parts != (int)parts->parts.size()) {
- ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
- << " expected: " << parts->parts.size() << dendl;
- op_ret = -ERR_INVALID_PART;
- return;
+ bucket_owner = bacl.get_owner();
+ if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
+ auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
+ rgw::IAM::s3PutObject, obj);
+ if (identity_policy_res == Effect::Deny) {
+ return false;
}
- for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
- uint64_t part_size = obj_iter->second.accounted_size;
- if (handled_parts < (int)parts->parts.size() - 1 &&
- part_size < min_part_size) {
- op_ret = -ERR_TOO_SMALL;
- return;
- }
-
- char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
- if (iter->first != (int)obj_iter->first) {
- ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
- << iter->first << " next uploaded: "
- << obj_iter->first << dendl;
- op_ret = -ERR_INVALID_PART;
- return;
+ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
+ ARN obj_arn(obj);
+ auto e = policy->eval(s->env, *s->auth.identity,
+ rgw::IAM::s3PutObject, obj_arn, princ_type);
+ if (e == Effect::Deny) {
+ return false;
+ }
+
+ if (!s->session_policies.empty()) {
+ auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
+ rgw::IAM::s3PutObject, obj);
+ if (session_policy_res == Effect::Deny) {
+ return false;
}
- string part_etag = rgw_string_unquote(iter->second);
- if (part_etag.compare(obj_iter->second.etag) != 0) {
- ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
- << " etag: " << iter->second << dendl;
- op_ret = -ERR_INVALID_PART;
- return;
+ if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
+ (session_policy_res == Effect::Allow && e == Effect::Allow)) {
+ return true;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
+ return true;
+ }
+ } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
+ return true;
+ }
}
+ return false;
+ }
+ if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
+ return true;
+ }
+ }
+
+ return verify_bucket_permission_no_policy(this, s, s->user_acl.get(),
+ &bacl, RGW_PERM_WRITE);
+}
- hex_to_buf(obj_iter->second.etag.c_str(), petag,
- CEPH_CRYPTO_MD5_DIGESTSIZE);
- hash.Update((const byte *)petag, sizeof(petag));
-
- RGWUploadPartInfo& obj_part = obj_iter->second;
-
- /* update manifest for part */
- string oid = mp.get_part(obj_iter->second.num);
- rgw_obj src_obj;
- src_obj.init_ns(s->bucket, oid, mp_ns);
+int RGWBulkUploadOp::handle_file(const std::string_view path,
+ const size_t size,
+ AlignedStreamGetter& body, optional_yield y)
+{
- if (obj_part.manifest.empty()) {
- ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
- << src_obj << dendl;
- op_ret = -ERR_INVALID_PART;
- return;
- } else {
- manifest.append(obj_part.manifest, store);
- }
+ ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
- if (obj_part.cs_info.compression_type != "none") {
- if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
- ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
- << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
- op_ret = -ERR_INVALID_PART;
- return;
- }
- int new_ofs; // offset in compression data for new part
- if (cs_info.blocks.size() > 0)
- new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
- else
- new_ofs = 0;
- for (const auto& block : obj_part.cs_info.blocks) {
- compression_block cb;
- cb.old_ofs = block.old_ofs + cs_info.orig_size;
- cb.new_ofs = new_ofs;
- cb.len = block.len;
- cs_info.blocks.push_back(cb);
- new_ofs = cb.new_ofs + cb.len;
- }
- if (!compressed)
- cs_info.compression_type = obj_part.cs_info.compression_type;
- cs_info.orig_size += obj_part.cs_info.orig_size;
- compressed = true;
- }
+ if (size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
+ op_ret = -ERR_TOO_LARGE;
+ return op_ret;
+ }
- rgw_obj_index_key remove_key;
- src_obj.key.get_index_key(&remove_key);
+ std::string bucket_name;
+ rgw_obj_key object;
+ std::tie(bucket_name, object) = *parse_path(path);
- remove_objs.push_back(remove_key);
+ auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ ACLOwner bowner;
- ofs += obj_part.size;
- accounted_size += obj_part.accounted_size;
+ op_ret = store->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
}
- } while (truncated);
- hash.Final((byte *)final_etag);
-
- buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
- snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
- "-%lld", (long long)parts->parts.size());
- etag = final_etag_str;
- ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
+ return op_ret;
+ }
- etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
+ std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(object);
- attrs[RGW_ATTR_ETAG] = etag_bl;
+ if (! handle_file_verify_permission(bucket->get_info(),
+ obj->get_obj(),
+ bucket->get_attrs(), bowner, y)) {
+ ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
+ op_ret = -EACCES;
+ return op_ret;
+ }
- if (compressed) {
- // write compression attribute to full object
- bufferlist tmp;
- ::encode(cs_info, tmp);
- attrs[RGW_ATTR_COMPRESSION] = tmp;
+ op_ret = bucket->check_quota(this, user_quota, bucket_quota, size, y);
+ if (op_ret < 0) {
+ return op_ret;
}
- target_obj.init(s->bucket, s->object.name);
- if (versioned_object) {
- store->gen_rand_obj_instance_name(&target_obj);
+ if (bucket->versioning_enabled()) {
+ obj->gen_rand_obj_instance_name();
}
- RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
+ rgw_placement_rule dest_placement = s->dest_placement;
+ dest_placement.inherit_from(bucket->get_placement_rule());
- obj_ctx.obj.set_atomic(target_obj);
+ std::unique_ptr<rgw::sal::Writer> processor;
+ processor = store->get_atomic_writer(this, s->yield, std::move(obj),
+ bowner.get_id(), obj_ctx,
+ &s->dest_placement, 0, s->req_id);
+ op_ret = processor->prepare(s->yield);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
+ return op_ret;
+ }
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
- RGWRados::Object::Write obj_op(&op_target);
+ /* No filters by default. */
+ rgw::sal::DataProcessor *filter = processor.get();
- obj_op.meta.manifest = &manifest;
- obj_op.meta.remove_objs = &remove_objs;
+ const auto& compression_type = store->get_zone()->get_params().get_compression_type(
+ dest_placement);
+ CompressorRef plugin;
+ boost::optional<RGWPutObj_Compress> compressor;
+ if (compression_type != "none") {
+ plugin = Compressor::create(s->cct, compression_type);
+ if (! plugin) {
+ ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
+ << compression_type << dendl;
+ } else {
+ compressor.emplace(s->cct, plugin, filter);
+ filter = &*compressor;
+ }
+ }
- obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
- obj_op.meta.owner = s->owner.get_id();
- obj_op.meta.flags = PUT_OBJ_CREATE;
- op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
- if (op_ret < 0)
- return;
+ /* Upload file content. */
+ ssize_t len = 0;
+ size_t ofs = 0;
+ MD5 hash;
+ // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+ hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+ do {
+ ceph::bufferlist data;
+ len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
- // remove the upload obj
- int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
- s->bucket_info, meta_obj, 0);
- if (r < 0) {
- ldout(store->ctx(), 0) << "WARNING: failed to remove object " << meta_obj << dendl;
- }
-}
+ ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
+ if (len < 0) {
+ op_ret = len;
+ return op_ret;
+ } else if (len > 0) {
+ hash.Update((const unsigned char *)data.c_str(), data.length());
+ op_ret = filter->process(std::move(data), ofs);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl;
+ return op_ret;
+ }
-int RGWAbortMultipart::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE))
- return -EACCES;
+ ofs += len;
+ }
- return 0;
-}
+ } while (len > 0);
-void RGWAbortMultipart::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ // flush
+ op_ret = filter->process({}, ofs);
+ if (op_ret < 0) {
+ return op_ret;
+ }
-void RGWAbortMultipart::execute()
-{
- op_ret = -EINVAL;
- string upload_id;
- string meta_oid;
- upload_id = s->info.args.get("uploadId");
- map<string, bufferlist> attrs;
- rgw_obj meta_obj;
- RGWMPObj mp;
+ if (ofs != size) {
+ ldpp_dout(this, 10) << "real file size different from declared" << dendl;
+ op_ret = -EINVAL;
+ return op_ret;
+ }
- if (upload_id.empty() || s->object.empty())
- return;
+ op_ret = bucket->check_quota(this, user_quota, bucket_quota, size, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
+ return op_ret;
+ }
- mp.init(s->object.name, upload_id);
- meta_oid = mp.get_meta();
+ char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
+ unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
+ hash.Final(m);
+ buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
- op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
- if (op_ret < 0)
- return;
+ /* Create metadata: ETAG. */
+ std::map<std::string, ceph::bufferlist> attrs;
+ std::string etag = calc_md5;
+ ceph::bufferlist etag_bl;
+ etag_bl.append(etag.c_str(), etag.size() + 1);
+ attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
- RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
- op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
-}
+ /* Create metadata: ACLs. */
+ RGWAccessControlPolicy policy;
+ policy.create_default(s->user->get_id(), s->user->get_display_name());
+ ceph::bufferlist aclbl;
+ policy.encode(aclbl);
+ attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
-int RGWListMultipart::verify_permission()
-{
- if (!verify_object_permission(s, RGW_PERM_READ))
- return -EACCES;
+ /* Create metadata: compression info. */
+ if (compressor && compressor->is_compressed()) {
+ ceph::bufferlist tmp;
+ RGWCompressionInfo cs_info;
+ cs_info.compression_type = plugin->get_type_name();
+ cs_info.orig_size = size;
+ cs_info.compressor_message = compressor->get_compressor_message();
+ cs_info.blocks = std::move(compressor->get_compression_blocks());
+ encode(cs_info, tmp);
+ attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
+ }
- return 0;
-}
+ /* Complete the transaction. */
+ op_ret = processor->complete(size, etag, nullptr, ceph::real_time(),
+ attrs, ceph::real_time() /* delete_at */,
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ s->yield);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
+ }
-void RGWListMultipart::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
+ return op_ret;
}
-void RGWListMultipart::execute()
+void RGWBulkUploadOp::execute(optional_yield y)
{
- map<string, bufferlist> xattrs;
- string meta_oid;
- RGWMPObj mp;
-
- op_ret = get_params();
- if (op_ret < 0)
- return;
+ ceph::bufferlist buffer(64 * 1024);
- mp.init(s->object.name, upload_id);
- meta_oid = mp.get_meta();
+ ldpp_dout(this, 20) << "start" << dendl;
- op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
- if (op_ret < 0)
+ /* Create an instance of stream-abstracting class. Having this indirection
+ * allows for easy introduction of decompressors like gzip and bzip2. */
+ auto stream = create_stream();
+ if (! stream) {
return;
+ }
- op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
- marker, parts, NULL, &truncated);
-}
-
-int RGWListBucketMultiparts::verify_permission()
-{
- if (!verify_bucket_permission(s, RGW_PERM_READ))
- return -EACCES;
+ /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
+ * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
+ std::string bucket_path, file_prefix;
+ std::tie(bucket_path, file_prefix) = handle_upload_path(s);
- return 0;
-}
+ auto status = rgw::tar::StatusIndicator::create();
+ do {
+ op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
+ if (op_ret < 0) {
+ ldpp_dout(this, 2) << "cannot read header" << dendl;
+ return;
+ }
-void RGWListBucketMultiparts::pre_exec()
-{
- rgw_bucket_object_pre_exec(s);
-}
+ /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
+ * must be tracked to detect out end-of-archive. It occurs when both of
+ * them are empty (zeroed). Tracing this particular inter-block dependency
+ * is responsibility of the rgw::tar::StatusIndicator class. */
+ boost::optional<rgw::tar::HeaderView> header;
+ std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
-void RGWListBucketMultiparts::execute()
-{
- vector<rgw_bucket_dir_entry> objs;
- string marker_meta;
+ if (! status.empty() && header) {
+ /* This specific block isn't empty (entirely zeroed), so we can parse
+ * it as a TAR header and dispatch. At the moment we do support only
+ * regular files and directories. Everything else (symlinks, devices)
+ * will be ignored but won't cease the whole upload. */
+ switch (header->get_filetype()) {
+ case rgw::tar::FileType::NORMAL_FILE: {
+ ldpp_dout(this, 2) << "handling regular file" << dendl;
+
+ std::string_view filename;
+ if (bucket_path.empty())
+ filename = header->get_filename();
+ else
+ filename = file_prefix + std::string(header->get_filename());
+ auto body = AlignedStreamGetter(0, header->get_filesize(),
+ rgw::tar::BLOCK_SIZE, *stream);
+ op_ret = handle_file(filename,
+ header->get_filesize(),
+ body, y);
+ if (! op_ret) {
+ /* Only regular files counts. */
+ num_created++;
+ } else {
+ failures.emplace_back(op_ret, std::string(filename));
+ }
+ break;
+ }
+ case rgw::tar::FileType::DIRECTORY: {
+ ldpp_dout(this, 2) << "handling regular directory" << dendl;
- op_ret = get_params();
- if (op_ret < 0)
- return;
+ std::string_view dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
+ op_ret = handle_dir(dirname, y);
+ if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
+ failures.emplace_back(op_ret, std::string(dirname));
+ }
+ break;
+ }
+ default: {
+ /* Not recognized. Skip. */
+ op_ret = 0;
+ break;
+ }
+ }
- if (s->prot_flags & RGW_REST_SWIFT) {
- string path_args;
- path_args = s->info.args.get("path");
- if (!path_args.empty()) {
- if (!delimiter.empty() || !prefix.empty()) {
- op_ret = -EINVAL;
- return;
+ /* In case of any problems with sub-request authorization Swift simply
+ * terminates whole upload immediately. */
+ if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
+ terminal_errors)) {
+ ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
+ break;
}
- prefix = path_args;
- delimiter="/";
+ } else {
+ ldpp_dout(this, 2) << "an empty block" << dendl;
+ op_ret = 0;
}
- }
- marker_meta = marker.get_meta();
- RGWRados::Bucket target(store, s->bucket_info);
- RGWRados::Bucket::List list_op(&target);
-
- list_op.params.prefix = prefix;
- list_op.params.delim = delimiter;
- list_op.params.marker = marker_meta;
- list_op.params.ns = mp_ns;
- list_op.params.filter = &mp_filter;
-
- op_ret = list_op.list_objects(max_uploads, &objs, &common_prefixes,
- &is_truncated);
- if (!objs.empty()) {
- vector<rgw_bucket_dir_entry>::iterator iter;
- RGWMultipartUploadEntry entry;
- for (iter = objs.begin(); iter != objs.end(); ++iter) {
- rgw_obj_key key(iter->key);
- if (!entry.mp.from_meta(key.name))
- continue;
- entry.obj = *iter;
- uploads.push_back(entry);
- }
- next_marker = entry;
- }
-}
+ buffer.clear();
+ } while (! status.eof());
-void RGWGetHealthCheck::execute()
-{
- if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
- (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
- /* Disabling path specified & existent in the filesystem. */
- op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
- } else {
- op_ret = 0; /* 200 OK */
- }
+ return;
}
-int RGWDeleteMultiObj::verify_permission()
+RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
{
- if (!verify_bucket_permission(s, RGW_PERM_WRITE))
- return -EACCES;
+ const size_t aligned_legnth = length + (-length % alignment);
+ ceph::bufferlist junk;
- return 0;
+ DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
}
-void RGWDeleteMultiObj::pre_exec()
+ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
+ ceph::bufferlist& dst)
{
- rgw_bucket_object_pre_exec(s);
+ const size_t max_to_read = std::min(want, length - position);
+ const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
+ if (len > 0) {
+ position += len;
+ }
+ return len;
}
-void RGWDeleteMultiObj::execute()
+ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
+ ceph::bufferlist& dst)
{
- RGWMultiDelDelete *multi_delete;
- vector<rgw_obj_key>::iterator iter;
- RGWMultiDelXMLParser parser;
- int num_processed = 0;
- RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
-
- op_ret = get_params();
- if (op_ret < 0) {
- goto error;
- }
-
- if (!data) {
- op_ret = -EINVAL;
- goto error;
- }
-
- if (!parser.init()) {
- op_ret = -EINVAL;
- goto error;
- }
-
- if (!parser.parse(data, len, 1)) {
- op_ret = -EINVAL;
- goto error;
+ const auto len = DecoratedStreamGetter::get_exactly(want, dst);
+ if (len > 0) {
+ position += len;
}
+ return len;
+}
- multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
- if (!multi_delete) {
- op_ret = -EINVAL;
- goto error;
- }
+int RGWGetAttrs::verify_permission(optional_yield y)
+{
+ s->object->set_atomic(s->obj_ctx);
- if (multi_delete->is_quiet())
- quiet = true;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- begin_response();
- if (multi_delete->objects.empty()) {
- goto done;
- }
+ auto iam_action = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion;
- for (iter = multi_delete->objects.begin();
- iter != multi_delete->objects.end() && num_processed < max_to_delete;
- ++iter, num_processed++) {
- rgw_obj obj(bucket, *iter);
+ if (!verify_object_permission(this, s, iam_action)) {
+ return -EACCES;
+ }
- obj_ctx->obj.set_atomic(obj);
+ return 0;
+}
- RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
- RGWRados::Object::Delete del_op(&del_target);
+void RGWGetAttrs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- del_op.params.bucket_owner = s->bucket_owner.get_id();
- del_op.params.versioning_status = s->bucket_info.versioning_status();
- del_op.params.obj_owner = s->owner;
+void RGWGetAttrs::execute(optional_yield y)
+{
+ op_ret = get_params();
+ if (op_ret < 0)
+ return;
- op_ret = del_op.delete_obj();
- if (op_ret == -ENOENT) {
- op_ret = 0;
- }
+ s->object->set_atomic(s->obj_ctx);
- send_partial_response(*iter, del_op.result.delete_marker,
- del_op.result.version_id, op_ret);
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object
+ << " ret=" << op_ret << dendl;
+ return;
}
- /* set the return code to zero, errors at this point will be
- dumped to the response */
- op_ret = 0;
-
-done:
- // will likely segfault if begin_response() has not been called
- end_response();
- free(data);
- return;
+ /* XXX RGWObject::get_obj_attrs() does not support filtering (yet) */
+ auto& obj_attrs = s->object->get_attrs();
+ if (attrs.size() != 0) {
+ /* return only attrs requested */
+ for (auto& att : attrs) {
+ auto iter = obj_attrs.find(att.first);
+ if (iter != obj_attrs.end()) {
+ att.second = iter->second;
+ }
+ }
+ } else {
+ /* return all attrs */
+ for (auto& att : obj_attrs) {
+ attrs.insert(get_attrs_t::value_type(att.first, att.second));;
+ }
+ }
-error:
- send_status();
- free(data);
return;
+ }
-}
-
-bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
- map<string, bufferlist>& battrs,
- ACLOwner& bucket_owner /* out */)
+int RGWRMAttrs::verify_permission(optional_yield y)
{
- RGWAccessControlPolicy bacl(store->ctx());
- int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
- if (ret < 0) {
- return false;
+ // This looks to be part of the RGW-NFS machinery and has no S3 or
+ // Swift equivalent.
+ bool perm;
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
+ } else {
+ perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
}
+ if (!perm)
+ return -EACCES;
- bucket_owner = bacl.get_owner();
+ return 0;
+}
- /* We can use global user_acl because each BulkDelete request is allowed
- * to work on entities from a single account only. */
- return verify_bucket_permission(s, s->user_acl.get(), &bacl, RGW_PERM_WRITE);
+void RGWRMAttrs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
}
-bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
+void RGWRMAttrs::execute(optional_yield y)
{
- auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
+ op_ret = get_params();
+ if (op_ret < 0)
+ return;
- RGWBucketInfo binfo;
- map<string, bufferlist> battrs;
- ACLOwner bowner;
+ s->object->set_atomic(s->obj_ctx);
- int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
- path.bucket_name, binfo, nullptr,
- &battrs);
- if (ret < 0) {
- goto binfo_fail;
+ op_ret = s->object->set_obj_attrs(this, s->obj_ctx, nullptr, &attrs, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to delete obj attrs, obj=" << s->object
+ << " ret=" << op_ret << dendl;
}
+ return;
+}
- if (!verify_permission(binfo, battrs, bowner)) {
- ret = -EACCES;
- goto auth_fail;
+int RGWSetAttrs::verify_permission(optional_yield y)
+{
+ // This looks to be part of the RGW-NFS machinery and has no S3 or
+ // Swift equivalent.
+ bool perm;
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
+ } else {
+ perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
}
+ if (!perm)
+ return -EACCES;
- if (!path.obj_key.empty()) {
- rgw_obj obj(binfo.bucket, path.obj_key);
- obj_ctx.obj.set_atomic(obj);
+ return 0;
+}
- RGWRados::Object del_target(store, binfo, obj_ctx, obj);
- RGWRados::Object::Delete del_op(&del_target);
+void RGWSetAttrs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- del_op.params.bucket_owner = binfo.owner;
- del_op.params.versioning_status = binfo.versioning_status();
- del_op.params.obj_owner = bowner;
+void RGWSetAttrs::execute(optional_yield y)
+{
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
- ret = del_op.delete_obj();
- if (ret < 0) {
- goto delop_fail;
- }
+ if (!rgw::sal::Object::empty(s->object.get())) {
+ rgw::sal::Attrs a(attrs);
+ op_ret = s->object->set_obj_attrs(this, s->obj_ctx, &a, nullptr, y);
} else {
- RGWObjVersionTracker ot;
- ot.read_version = binfo.ep_objv;
-
- ret = store->delete_bucket(binfo, ot);
- if (0 == ret) {
- ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
- binfo.bucket.name, false);
- if (ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
- << dendl;
- }
- }
- if (ret < 0) {
- goto delop_fail;
- }
-
- if (!store->get_zonegroup().is_master) {
- bufferlist in_data;
- ret = forward_request_to_master(s, &ot.read_version, store, in_data,
- nullptr);
- if (ret < 0) {
- if (ret == -ENOENT) {
- /* adjust error, we want to return with NoSuchBucket and not
- * NoSuchKey */
- ret = -ERR_NO_SUCH_BUCKET;
- }
- goto delop_fail;
- }
- }
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
}
- num_deleted++;
- return true;
+} /* RGWSetAttrs::execute() */
+void RGWGetObjLayout::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
-binfo_fail:
- if (-ENOENT == ret) {
- ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
- num_unfound++;
- } else {
- ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
- << dendl;
+void RGWGetObjLayout::execute(optional_yield y)
+{
+}
- fail_desc_t failed_item = {
- .err = ret,
- .path = path
- };
- failures.push_back(failed_item);
- }
- return false;
-auth_fail:
- ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
- {
- fail_desc_t failed_item = {
- .err = ret,
- .path = path
- };
- failures.push_back(failed_item);
- }
- return false;
+int RGWConfigBucketMetaSearch::verify_permission(optional_yield y)
+{
+ if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
+ return -EACCES;
+ }
-delop_fail:
- if (-ENOENT == ret) {
- ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
- num_unfound++;
- } else {
- fail_desc_t failed_item = {
- .err = ret,
- .path = path
- };
- failures.push_back(failed_item);
- }
- return false;
+ return 0;
}
-bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
+void RGWConfigBucketMetaSearch::pre_exec()
{
- ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
- for (auto path : paths) {
- ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
- delete_single(path);
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWConfigBucketMetaSearch::execute(optional_yield y)
+{
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
+ return;
}
- return true;
+ s->bucket->get_info().mdsearch_config = mdsearch_config;
+
+ op_ret = s->bucket->put_info(this, false, real_time());
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ return;
+ }
+ s->bucket_attrs = s->bucket->get_attrs();
}
-int RGWBulkDelete::verify_permission()
+int RGWGetBucketMetaSearch::verify_permission(optional_yield y)
{
+ if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
+ return -EACCES;
+ }
+
return 0;
}
-void RGWBulkDelete::pre_exec()
+void RGWGetBucketMetaSearch::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
-void RGWBulkDelete::execute()
+int RGWDelBucketMetaSearch::verify_permission(optional_yield y)
{
- deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
-
- bool is_truncated = false;
- do {
- list<RGWBulkDelete::acct_path_t> items;
-
- int ret = get_data(items, &is_truncated);
- if (ret < 0) {
- return;
- }
-
- ret = deleter->delete_chunk(items);
- } while (!op_ret && is_truncated);
+ if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
+ return -EACCES;
+ }
- return;
+ return 0;
}
+void RGWDelBucketMetaSearch::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
-constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
-
-int RGWBulkUploadOp::verify_permission()
+void RGWDelBucketMetaSearch::execute(optional_yield y)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
+ s->bucket->get_info().mdsearch_config.clear();
- if (! verify_user_permission(s, RGW_PERM_WRITE)) {
- return -EACCES;
+ op_ret = s->bucket->put_info(this, false, real_time());
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
+ << " returned err=" << op_ret << dendl;
+ return;
}
+ s->bucket_attrs = s->bucket->get_attrs();
+}
- if (s->user->user_id.tenant != s->bucket_tenant) {
- ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
- << " (user_id.tenant=" << s->user->user_id.tenant
- << " requested=" << s->bucket_tenant << ")"
- << dendl;
- return -EACCES;
- }
- if (s->user->max_buckets < 0) {
- return -EPERM;
- }
+RGWHandler::~RGWHandler()
+{
+}
+
+int RGWHandler::init(rgw::sal::Store* _store,
+ struct req_state *_s,
+ rgw::io::BasicClient *cio)
+{
+ store = _store;
+ s = _s;
return 0;
}
-void RGWBulkUploadOp::pre_exec()
+int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y)
{
- rgw_bucket_object_pre_exec(s);
+ int ret = rgw_build_bucket_policies(dpp, store, s, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket
+ << " failed, ret=" << ret << dendl;
+ return ret==-ENODATA ? -EACCES : ret;
+ }
+
+ rgw_build_iam_environment(store, s);
+ return ret;
}
-boost::optional<std::pair<std::string, rgw_obj_key>>
-RGWBulkUploadOp::parse_path(const boost::string_ref& path)
+int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket, optional_yield y)
{
- /* We need to skip all slashes at the beginning in order to preserve
- * compliance with Swift. */
- const size_t start_pos = path.find_first_not_of('/');
+ if (only_bucket) {
+ /* already read bucket info */
+ return 0;
+ }
+ int ret = rgw_build_object_policies(op, store, s, op->prefetch_data(), y);
- if (boost::string_ref::npos != start_pos) {
- /* Seperator is the first slash after the leading ones. */
- const size_t sep_pos = path.substr(start_pos).find('/');
+ if (ret < 0) {
+ ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
+ << s->object << " only_bucket=" << only_bucket
+ << " ret=" << ret << dendl;
+ if (ret == -ENODATA)
+ ret = -EACCES;
+ if (s->auth.identity->is_anonymous() && ret == -EACCES)
+ ret = -EPERM;
+ }
- if (boost::string_ref::npos != sep_pos) {
- const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
- const auto obj_name = path.substr(sep_pos + 1);
+ return ret;
+}
- return std::make_pair(bucket_name.to_string(),
- rgw_obj_key(obj_name.to_string()));
- } else {
- /* It's guaranteed here that bucket name is at least one character
- * long and is different than slash. */
- return std::make_pair(path.substr(start_pos).to_string(),
- rgw_obj_key());
- }
- }
+int RGWOp::error_handler(int err_no, string *error_content, optional_yield y) {
+ return dialect_handler->error_handler(err_no, error_content, y);
+}
- return boost::none;
+int RGWHandler::error_handler(int err_no, string *error_content, optional_yield) {
+ // This is the do-nothing error handler
+ return err_no;
}
-std::pair<std::string, std::string>
-RGWBulkUploadOp::handle_upload_path(struct req_state *s)
+std::ostream& RGWOp::gen_prefix(std::ostream& out) const
{
- std::string bucket_path, file_prefix;
- if (! s->init_state.url_bucket.empty()) {
- file_prefix = bucket_path = s->init_state.url_bucket + "/";
- if (! s->object.empty()) {
- std::string& object_name = s->object.name;
+ // append <dialect>:<op name> to the prefix
+ return s->gen_prefix(out) << s->dialect << ':' << name() << ' ';
+}
- /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
- * we can safely examine its last element. */
- if (object_name.back() == '/') {
- file_prefix.append(object_name);
- } else {
- file_prefix.append(object_name).append("/");
- }
- }
+void RGWDefaultResponseOp::send_response() {
+ if (op_ret) {
+ set_req_state_err(s, op_ret);
}
- return std::make_pair(bucket_path, file_prefix);
+ dump_errno(s);
+ end_header(s);
}
-int RGWBulkUploadOp::handle_dir_verify_permission()
+void RGWPutBucketPolicy::send_response()
{
- if (s->user->max_buckets > 0) {
- RGWUserBuckets buckets;
- std::string marker;
- bool is_truncated = false;
- op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
- marker, std::string(), s->user->max_buckets,
- false, &is_truncated);
- if (op_ret < 0) {
- return op_ret;
- }
+ if (!op_ret) {
+ /* A successful Put Bucket Policy should return a 204 on success */
+ op_ret = STATUS_NO_CONTENT;
+ }
+ if (op_ret) {
+ set_req_state_err(s, op_ret);
+ }
+ dump_errno(s);
+ end_header(s);
+}
- if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
- return -ERR_TOO_MANY_BUCKETS;
- }
+int RGWPutBucketPolicy::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) {
+ return -EACCES;
}
return 0;
}
-static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
+int RGWPutBucketPolicy::get_params(optional_yield y)
{
- /* the request of container or object level will contain bucket name.
- * only at account level need to append the bucket name */
- if (info.script_uri.find(bucket_name) != std::string::npos) {
- return;
- }
+ const auto max_size = s->cct->_conf->rgw_max_put_param_size;
+ // At some point when I have more time I want to make a version of
+ // rgw_rest_read_all_input that doesn't use malloc.
+ std::tie(op_ret, data) = read_all_input(s, max_size, false);
- ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
- info.script_uri.append("/").append(bucket_name);
- info.request_uri_aws4 = info.request_uri = info.script_uri;
- info.effective_uri = "/" + bucket_name;
+ // And throws exceptions.
+ return op_ret;
}
-int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
+void RGWPutBucketPolicy::execute(optional_yield y)
{
- ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return;
+ }
- op_ret = handle_dir_verify_permission();
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
- return op_ret;
+ ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
}
- std::string bucket_name;
- rgw_obj_key object_junk;
- std::tie(bucket_name, object_junk) = *parse_path(path);
+ try {
+ const Policy p(s->cct, s->bucket_tenant, data);
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ if (s->bucket_access_conf &&
+ s->bucket_access_conf->block_public_policy() &&
+ rgw::IAM::is_public(p)) {
+ op_ret = -EACCES;
+ return;
+ }
- rgw_raw_obj obj(store->get_zone_params().domain_root,
- rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&p, this, &attrs] {
+ attrs[RGW_ATTR_IAM_POLICY].clear();
+ attrs[RGW_ATTR_IAM_POLICY].append(p.text);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ return op_ret;
+ });
+ } catch (rgw::IAM::PolicyParseException& e) {
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
+ op_ret = -EINVAL;
+ }
+}
- /* we need to make sure we read bucket info, it's not read before for this
- * specific request */
- RGWBucketInfo binfo;
- std::map<std::string, ceph::bufferlist> battrs;
- op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
- binfo, NULL, &battrs);
- if (op_ret < 0 && op_ret != -ENOENT) {
- return op_ret;
+void RGWGetBucketPolicy::send_response()
+{
+ if (op_ret) {
+ set_req_state_err(s, op_ret);
}
- const bool bucket_exists = (op_ret != -ENOENT);
+ dump_errno(s);
+ end_header(s, this, "application/json");
+ dump_body(s, policy);
+}
- if (bucket_exists) {
- RGWAccessControlPolicy old_policy(s->cct);
- int r = get_bucket_policy_from_attr(s->cct, store, binfo,
- battrs, &old_policy);
- if (r >= 0) {
- if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
- op_ret = -EEXIST;
- return op_ret;
- }
- }
+int RGWGetBucketPolicy::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
+ return -EACCES;
}
- RGWBucketInfo master_info;
- rgw_bucket *pmaster_bucket = nullptr;
- uint32_t *pmaster_num_shards = nullptr;
- real_time creation_time;
- obj_version objv, ep_objv, *pobjv = nullptr;
+ return 0;
+}
- if (! store->is_meta_master()) {
- JSONParser jp;
- ceph::bufferlist in_data;
- req_info info = s->info;
- forward_req_info(s->cct, info, bucket_name);
- op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
- if (op_ret < 0) {
- return op_ret;
+void RGWGetBucketPolicy::execute(optional_yield y)
+{
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ auto aiter = attrs.find(RGW_ATTR_IAM_POLICY);
+ if (aiter == attrs.end()) {
+ ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
+ << s->bucket_name << dendl;
+ op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
+ s->err.message = "The bucket policy does not exist";
+ return;
+ } else {
+ policy = attrs[RGW_ATTR_IAM_POLICY];
+
+ if (policy.length() == 0) {
+ ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
+ << s->bucket_name << dendl;
+ op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
+ s->err.message = "The bucket policy does not exist";
+ return;
}
+ }
+}
- JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
- JSONDecoder::decode_json("object_ver", objv, &jp);
- JSONDecoder::decode_json("bucket_info", master_info, &jp);
+void RGWDeleteBucketPolicy::send_response()
+{
+ if (op_ret) {
+ set_req_state_err(s, op_ret);
+ }
+ dump_errno(s);
+ end_header(s);
+}
- ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
- << objv.ver << dendl;
- ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
- << dendl;
+int RGWDeleteBucketPolicy::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- pmaster_bucket= &master_info.bucket;
- creation_time = master_info.creation_time;
- pmaster_num_shards = &master_info.num_shards;
- pobjv = &objv;
- } else {
- pmaster_bucket = nullptr;
- pmaster_num_shards = nullptr;
- }
-
-
- std::string placement_rule;
- if (bucket_exists) {
- std::string selected_placement_rule;
- rgw_bucket bucket;
- bucket.tenant = s->bucket_tenant;
- bucket.name = s->bucket_name;
- op_ret = store->select_bucket_placement(*(s->user),
- store->get_zonegroup().get_id(),
- placement_rule,
- &selected_placement_rule,
- nullptr);
- if (selected_placement_rule != binfo.placement_rule) {
- op_ret = -EEXIST;
- ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
- return op_ret;
- }
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) {
+ return -EACCES;
}
- /* Create metadata: ACLs. */
- std::map<std::string, ceph::bufferlist> attrs;
- RGWAccessControlPolicy policy;
- policy.create_default(s->user->user_id, s->user->display_name);
- ceph::bufferlist aclbl;
- policy.encode(aclbl);
- attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
+ return 0;
+}
- RGWQuotaInfo quota_info;
- const RGWQuotaInfo * pquota_info = nullptr;
+void RGWDeleteBucketPolicy::execute(optional_yield y)
+{
+ bufferlist data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
- rgw_bucket bucket;
- bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
- bucket.name = bucket_name;
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ attrs.erase(RGW_ATTR_IAM_POLICY);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ return op_ret;
+ });
+}
+void RGWPutBucketObjectLock::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- RGWBucketInfo out_info;
- op_ret = store->create_bucket(*(s->user),
- bucket,
- store->get_zonegroup().get_id(),
- placement_rule, binfo.swift_ver_location,
- pquota_info, attrs,
- out_info, pobjv, &ep_objv, creation_time,
- pmaster_bucket, pmaster_num_shards, true);
- /* continue if EEXIST and create_bucket will fail below. this way we can
- * recover from a partial create by retrying it. */
- ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
- << ", bucket=" << bucket << dendl;
+int RGWPutBucketObjectLock::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- if (op_ret && op_ret != -EEXIST) {
- return op_ret;
- }
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketObjectLockConfiguration);
+}
- const bool existed = (op_ret == -EEXIST);
- if (existed) {
- /* bucket already existed, might have raced with another bucket creation, or
- * might be partial bucket creation that never completed. Read existing bucket
- * info, verify that the reported bucket owner is the current user.
- * If all is ok then update the user's list of buckets.
- * Otherwise inform client about a name conflict.
- */
- if (out_info.owner.compare(s->user->user_id) != 0) {
- op_ret = -EEXIST;
- ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
- return op_ret;
- }
- bucket = out_info.bucket;
+void RGWPutBucketObjectLock::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ s->err.message = "object lock configuration can't be set if bucket object lock not enabled";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_BUCKET_STATE;
+ return;
}
- op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
- out_info.creation_time, false);
- if (op_ret && !existed && op_ret != -EEXIST) {
- /* if it exists (or previously existed), don't remove it! */
- op_ret = rgw_unlink_bucket(store, s->user->user_id,
- bucket.tenant, bucket.name);
- if (op_ret < 0) {
- ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
- << op_ret << dendl;
- }
- } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
- ldout(s->cct, 20) << "bulk upload: containers already exists"
- << dendl;
- op_ret = -ERR_BUCKET_EXISTS;
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return;
+ }
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- return op_ret;
-}
-
+ try {
+ RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+ if (obj_lock.has_rule() && !obj_lock.retention_period_valid()) {
+ s->err.message = "retention period must be a positive integer value";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_RETENTION_PERIOD;
+ return;
+ }
-bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
- std::map<std::string, ceph::bufferlist>& battrs,
- ACLOwner& bucket_owner /* out */)
-{
- RGWAccessControlPolicy bacl(store->ctx());
- op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
- << dendl;
- return false;
+ ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
}
- bucket_owner = bacl.get_owner();
- return verify_bucket_permission(s, s->user_acl.get(), &bacl, RGW_PERM_WRITE);
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ s->bucket->get_info().obj_lock = obj_lock;
+ op_ret = s->bucket->put_info(this, false, real_time());
+ return op_ret;
+ });
+ return;
}
-int RGWBulkUploadOp::handle_file(const boost::string_ref path,
- const size_t size,
- AlignedStreamGetter& body)
+void RGWGetBucketObjectLock::pre_exec()
{
+ rgw_bucket_object_pre_exec(s);
+}
- ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
- << dendl;
+int RGWGetBucketObjectLock::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
- RGWPutObjDataProcessor *filter = nullptr;
- boost::optional<RGWPutObj_Compress> compressor;
+ return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketObjectLockConfiguration);
+}
- if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
- op_ret = -ERR_TOO_LARGE;
- return op_ret;
+void RGWGetBucketObjectLock::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
+ return;
}
+}
- std::string bucket_name;
- rgw_obj_key object;
- std::tie(bucket_name, object) = *parse_path(path);
+int RGWPutObjRetention::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
- RGWBucketInfo binfo;
- std::map<std::string, ceph::bufferlist> battrs;
- ACLOwner bowner;
- op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
- bucket_name, binfo, nullptr, &battrs);
- if (op_ret == -ENOENT) {
- ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
- << dendl;
- } else if (op_ret < 0) {
- return op_ret;
+ if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectRetention)) {
+ return -EACCES;
}
-
- if (! handle_file_verify_permission(binfo, battrs, bowner)) {
- ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
- op_ret = -EACCES;
+ op_ret = get_params(y);
+ if (op_ret) {
return op_ret;
}
-
- op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
- user_quota, bucket_quota, size);
- if (op_ret < 0) {
- return op_ret;
+ if (bypass_governance_mode) {
+ bypass_perm = verify_object_permission(this, s, rgw::IAM::s3BypassGovernanceRetention);
}
+ return 0;
+}
- RGWPutObjProcessor_Atomic processor(obj_ctx,
- binfo,
- binfo.bucket,
- object.name,
- /* part size */
- s->cct->_conf->rgw_obj_stripe_size,
- s->req_id,
- binfo.versioning_enabled());
-
- /* No filters by default. */
- filter = &processor;
+void RGWPutObjRetention::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- op_ret = processor.prepare(store, nullptr);
- if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
- << op_ret << dendl;
- return op_ret;
+void RGWPutObjRetention::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ s->err.message = "object retention can't be set if bucket object lock not configured";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_REQUEST;
+ return;
}
- const auto& compression_type = store->get_zone_params().get_compression_type(
- binfo.placement_rule);
- CompressorRef plugin;
- if (compression_type != "none") {
- plugin = Compressor::create(s->cct, compression_type);
- if (! plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
- << compression_type << dendl;
- } else {
- compressor.emplace(s->cct, plugin, filter);
- filter = &*compressor;
- }
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
+ op_ret = -EINVAL;
+ return;
}
- /* Upload file content. */
- ssize_t len = 0;
- size_t ofs = 0;
- MD5 hash;
- do {
- ceph::bufferlist data;
- len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
-
- ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
- if (len < 0) {
- op_ret = len;
- return op_ret;
- } else if (len > 0) {
- hash.Update((const byte *)data.c_str(), data.length());
- op_ret = put_data_and_throttle(filter, data, ofs, false);
- if (op_ret < 0) {
- ldout(s->cct, 20) << "processor->thottle_data() returned ret="
- << op_ret << dendl;
- return op_ret;
- }
-
- ofs += len;
- }
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
- } while (len > 0);
+ try {
+ RGWXMLDecoder::decode_xml("Retention", obj_retention, &parser, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
- if (ofs != size) {
- ldout(s->cct, 10) << "bulk upload: real file size different from declared"
- << dendl;
+ if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph_clock_now()) {
+ s->err.message = "the retain-until date must be in the future";
+ ldpp_dout(this, 0) << "ERROR: " << s->err.message << dendl;
op_ret = -EINVAL;
+ return;
}
+ bufferlist bl;
+ obj_retention.encode(bl);
- op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
- user_quota, bucket_quota, size);
+ //check old retention
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
- << dendl;
- return op_ret;
+ ldpp_dout(this, 0) << "ERROR: get obj attr error"<< dendl;
+ return;
+ }
+ rgw::sal::Attrs attrs = s->object->get_attrs();
+ auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
+ if (aiter != attrs.end()) {
+ RGWObjectRetention old_obj_retention;
+ try {
+ decode(old_obj_retention, aiter->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl;
+ op_ret = -EIO;
+ return;
+ }
+ if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph::real_clock::to_time_t(old_obj_retention.get_retain_until_date())) {
+ if (old_obj_retention.get_mode().compare("GOVERNANCE") != 0 || !bypass_perm || !bypass_governance_mode) {
+ s->err.message = "proposed retain-until date shortens an existing retention period and governance bypass check failed";
+ op_ret = -EACCES;
+ return;
+ }
+ } else if (old_obj_retention.get_mode() == obj_retention.get_mode()) {
+ // ok if retention mode doesn't change
+ } else if (obj_retention.get_mode() == "GOVERNANCE") {
+ s->err.message = "can't change retention mode from COMPLIANCE to GOVERNANCE";
+ op_ret = -EACCES;
+ return;
+ } else if (!bypass_perm || !bypass_governance_mode) {
+ s->err.message = "can't change retention mode from GOVERNANCE without governance bypass";
+ op_ret = -EACCES;
+ return;
+ }
}
- char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
- hash.Final(m);
- buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
+ op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_RETENTION, bl, s->yield, this);
- /* Create metadata: ETAG. */
- std::map<std::string, ceph::bufferlist> attrs;
- std::string etag = calc_md5;
- ceph::bufferlist etag_bl;
- etag_bl.append(etag.c_str(), etag.size() + 1);
- attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
+ return;
+}
- /* Create metadata: ACLs. */
- RGWAccessControlPolicy policy;
- policy.create_default(s->user->user_id, s->user->display_name);
- ceph::bufferlist aclbl;
- policy.encode(aclbl);
- attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
+int RGWGetObjRetention::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- /* Create metadata: compression info. */
- if (compressor && compressor->is_compressed()) {
- ceph::bufferlist tmp;
- RGWCompressionInfo cs_info;
- cs_info.compression_type = plugin->get_type_name();
- cs_info.orig_size = s->obj_size;
- cs_info.blocks = std::move(compressor->get_compression_blocks());
- ::encode(cs_info, tmp);
- attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
+ if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention)) {
+ return -EACCES;
}
+ return 0;
+}
- /* Complete the transaction. */
- op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
- ceph::real_time() /* delete_at */);
+void RGWGetObjRetention::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWGetObjRetention::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ s->err.message = "bucket object lock not configured";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_REQUEST;
+ return;
+ }
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
- << op_ret << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object
+ << " ret=" << op_ret << dendl;
+ return;
+ }
+ rgw::sal::Attrs attrs = s->object->get_attrs();
+ auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
+ if (aiter == attrs.end()) {
+ op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
+ return;
}
- return op_ret;
+ bufferlist::const_iterator iter{&aiter->second};
+ try {
+ obj_retention.decode(iter);
+ } catch (const buffer::error& e) {
+ ldpp_dout(this, 0) << __func__ << "decode object retention config failed" << dendl;
+ op_ret = -EIO;
+ return;
+ }
+ return;
}
-void RGWBulkUploadOp::execute()
+int RGWPutObjLegalHold::verify_permission(optional_yield y)
{
- ceph::bufferlist buffer(64 * 1024);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- ldout(s->cct, 20) << "bulk upload: start" << dendl;
+ if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectLegalHold)) {
+ return -EACCES;
+ }
+ return 0;
+}
- /* Create an instance of stream-abstracting class. Having this indirection
- * allows for easy introduction of decompressors like gzip and bzip2. */
- auto stream = create_stream();
- if (! stream) {
+void RGWPutObjLegalHold::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWPutObjLegalHold::execute(optional_yield y) {
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ s->err.message = "object legal hold can't be set if bucket object lock not enabled";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_REQUEST;
return;
}
- /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
- * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
- std::string bucket_path, file_prefix;
- std::tie(bucket_path, file_prefix) = handle_upload_path(s);
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
- auto status = rgw::tar::StatusIndicator::create();
- do {
- op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
- if (op_ret < 0) {
- ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
- return;
- }
+ op_ret = get_params(y);
+ if (op_ret < 0)
+ return;
- /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
- * must be tracked to detect out end-of-archive. It occurs when both of
- * them are empty (zeroed). Tracing this particular inter-block dependency
- * is responsibility of the rgw::tar::StatusIndicator class. */
- boost::optional<rgw::tar::HeaderView> header;
- std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
- if (! status.empty() && header) {
- /* This specific block isn't empty (entirely zeroed), so we can parse
- * it as a TAR header and dispatch. At the moment we do support only
- * regular files and directories. Everything else (symlinks, devices)
- * will be ignored but won't cease the whole upload. */
- switch (header->get_filetype()) {
- case rgw::tar::FileType::NORMAL_FILE: {
- ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
+ try {
+ RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true);
+ } catch (RGWXMLDecoder::err &err) {
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+ bufferlist bl;
+ obj_legal_hold.encode(bl);
+ //if instance is empty, we should modify the latest object
+ op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_LEGAL_HOLD, bl, s->yield, this);
+ return;
+}
- boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
- file_prefix + header->get_filename().to_string();
- auto body = AlignedStreamGetter(0, header->get_filesize(),
- rgw::tar::BLOCK_SIZE, *stream);
- op_ret = handle_file(filename,
- header->get_filesize(),
- body);
- if (! op_ret) {
- /* Only regular files counts. */
- num_created++;
- } else {
- failures.emplace_back(op_ret, filename.to_string());
- }
- break;
- }
- case rgw::tar::FileType::DIRECTORY: {
- ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
+int RGWGetObjLegalHold::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
- op_ret = handle_dir(dirname);
- if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
- failures.emplace_back(op_ret, dirname.to_string());
- }
- break;
- }
- default: {
- /* Not recognized. Skip. */
- op_ret = 0;
- break;
- }
- }
+ if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold)) {
+ return -EACCES;
+ }
+ return 0;
+}
- /* In case of any problems with sub-request authorization Swift simply
- * terminates whole upload immediately. */
- if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
- terminal_errors)) {
- ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
- << dendl;
- break;
- }
- } else {
- ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
- op_ret = 0;
- }
+void RGWGetObjLegalHold::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
- buffer.clear();
- } while (! status.eof());
+void RGWGetObjLegalHold::execute(optional_yield y)
+{
+ if (!s->bucket->get_info().obj_lock_enabled()) {
+ s->err.message = "bucket object lock not configured";
+ ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
+ op_ret = -ERR_INVALID_REQUEST;
+ return;
+ }
+ map<string, bufferlist> attrs;
+ op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object
+ << " ret=" << op_ret << dendl;
+ return;
+ }
+ auto aiter = s->object->get_attrs().find(RGW_ATTR_OBJECT_LEGAL_HOLD);
+ if (aiter == s->object->get_attrs().end()) {
+ op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
+ return;
+ }
+ bufferlist::const_iterator iter{&aiter->second};
+ try {
+ obj_legal_hold.decode(iter);
+ } catch (const buffer::error& e) {
+ ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" << dendl;
+ op_ret = -EIO;
+ return;
+ }
return;
}
-RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
+void RGWGetClusterStat::execute(optional_yield y)
{
- const size_t aligned_legnth = length + (-length % alignment);
- ceph::bufferlist junk;
-
- DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
+ op_ret = store->cluster_stat(stats_op);
}
-ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
- ceph::bufferlist& dst)
+int RGWGetBucketPolicyStatus::verify_permission(optional_yield y)
{
- const size_t max_to_read = std::min(want, length - position);
- const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
- if (len > 0) {
- position += len;
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicyStatus)) {
+ return -EACCES;
}
- return len;
+
+ return 0;
}
-ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
- ceph::bufferlist& dst)
+void RGWGetBucketPolicyStatus::execute(optional_yield y)
{
- const auto len = DecoratedStreamGetter::get_exactly(want, dst);
- if (len > 0) {
- position += len;
- }
- return len;
+ isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(this);
}
-int RGWSetAttrs::verify_permission()
+int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y)
{
- bool perm;
- if (!s->object.empty()) {
- perm = verify_object_permission(s, RGW_PERM_WRITE);
- } else {
- perm = verify_bucket_permission(s, RGW_PERM_WRITE);
- }
- if (!perm)
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
return -EACCES;
+ }
return 0;
}
-void RGWSetAttrs::pre_exec()
+int RGWPutBucketPublicAccessBlock::get_params(optional_yield y)
{
- rgw_bucket_object_pre_exec(s);
+ const auto max_size = s->cct->_conf->rgw_max_put_param_size;
+ std::tie(op_ret, data) = read_all_input(s, max_size, false);
+ return op_ret;
}
-void RGWSetAttrs::execute()
+void RGWPutBucketPublicAccessBlock::execute(optional_yield y)
{
- op_ret = get_params();
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ op_ret = get_params(y);
if (op_ret < 0)
return;
- rgw_obj obj(s->bucket, s->object);
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ ldpp_dout(this, 0) << "ERROR: malformed XML" << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
+ try {
+ RGWXMLDecoder::decode_xml("PublicAccessBlockConfiguration", access_conf, &parser, true);
+ } catch (RGWXMLDecoder::err &err) {
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ bufferlist bl;
+ access_conf.encode(bl);
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, &bl] {
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ attrs[RGW_ATTR_PUBLIC_ACCESS] = bl;
+ return s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ });
+
+}
+
+int RGWGetBucketPublicAccessBlock::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
+ return -EACCES;
+ }
- store->set_atomic(s->obj_ctx, obj);
+ return 0;
+}
- if (!s->object.empty()) {
- op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
+void RGWGetBucketPublicAccessBlock::execute(optional_yield y)
+{
+ auto attrs = s->bucket_attrs;
+ if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
+ aiter == attrs.end()) {
+ ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
+ << s->bucket_name << dendl;
+ // return the default;
+ return;
} else {
- for (auto& iter : attrs) {
- s->bucket_attrs[iter.first] = std::move(iter.second);
+ bufferlist::const_iterator iter{&aiter->second};
+ try {
+ access_conf.decode(iter);
+ } catch (const buffer::error& e) {
+ ldpp_dout(this, 0) << __func__ << "decode access_conf failed" << dendl;
+ op_ret = -EIO;
+ return;
}
- op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
- &s->bucket_info.objv_tracker);
}
}
-void RGWGetObjLayout::pre_exec()
+
+void RGWDeleteBucketPublicAccessBlock::send_response()
{
- rgw_bucket_object_pre_exec(s);
+ if (op_ret) {
+ set_req_state_err(s, op_ret);
+ }
+ dump_errno(s);
+ end_header(s);
}
-void RGWGetObjLayout::execute()
+int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y)
{
- rgw_obj obj(s->bucket, s->object);
- RGWRados::Object target(store,
- s->bucket_info,
- *static_cast<RGWObjectCtx *>(s->obj_ctx),
- rgw_obj(s->bucket, s->object));
- RGWRados::Object::Read stat_op(&target);
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
- op_ret = stat_op.prepare();
+void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
+{
+ bufferlist data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- head_obj = stat_op.state.head_obj;
-
- op_ret = target.get_manifest(&manifest);
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
+ rgw::sal::Attrs attrs(s->bucket_attrs);
+ attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ return op_ret;
+ });
}
-
-RGWHandler::~RGWHandler()
+int RGWPutBucketEncryption::get_params(optional_yield y)
{
+ const auto max_size = s->cct->_conf->rgw_max_put_param_size;
+ std::tie(op_ret, data) = read_all_input(s, max_size, false);
+ return op_ret;
}
-int RGWHandler::init(RGWRados *_store,
- struct req_state *_s,
- rgw::io::BasicClient *cio)
+int RGWPutBucketEncryption::verify_permission(optional_yield y)
{
- store = _store;
- s = _s;
-
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketEncryption)) {
+ return -EACCES;
+ }
return 0;
}
-int RGWHandler::do_init_permissions()
+void RGWPutBucketEncryption::execute(optional_yield y)
{
- int ret = rgw_build_bucket_policies(store, s);
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ op_ret = get_params(y);
+ if (op_ret < 0) {
+ return;
+ }
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ ldpp_dout(this, 0) << "ERROR: malformed XML" << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
- if (ret < 0) {
- ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
- if (ret == -ENODATA)
- ret = -EACCES;
+ try {
+ RGWXMLDecoder::decode_xml("ServerSideEncryptionConfiguration", bucket_encryption_conf, &parser, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
}
- return ret;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ bufferlist conf_bl;
+ bucket_encryption_conf.encode(conf_bl);
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y, &conf_bl] {
+ rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ attrs[RGW_ATTR_BUCKET_ENCRYPTION_POLICY] = conf_bl;
+ return s->bucket->merge_and_store_attrs(this, attrs, y);
+ });
}
-int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
+int RGWGetBucketEncryption::verify_permission(optional_yield y)
{
- if (only_bucket) {
- /* already read bucket info */
- return 0;
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketEncryption)) {
+ return -EACCES;
}
- int ret = rgw_build_object_policies(store, s, op->prefetch_data());
+ return 0;
+}
- if (ret < 0) {
- ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
- << s->object << " only_bucket=" << only_bucket
- << " ret=" << ret << dendl;
- if (ret == -ENODATA)
- ret = -EACCES;
+void RGWGetBucketEncryption::execute(optional_yield y)
+{
+ const auto& attrs = s->bucket_attrs;
+ if (auto aiter = attrs.find(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
+ aiter == attrs.end()) {
+ ldpp_dout(this, 0) << "can't find BUCKET ENCRYPTION attr for bucket_name = " << s->bucket_name << dendl;
+ op_ret = -ENOENT;
+ s->err.message = "The server side encryption configuration was not found";
+ return;
+ } else {
+ bufferlist::const_iterator iter{&aiter->second};
+ try {
+ bucket_encryption_conf.decode(iter);
+ } catch (const buffer::error& e) {
+ ldpp_dout(this, 0) << __func__ << "decode bucket_encryption_conf failed" << dendl;
+ op_ret = -EIO;
+ return;
+ }
}
-
- return ret;
}
-int RGWOp::error_handler(int err_no, string *error_content) {
- return dialect_handler->error_handler(err_no, error_content);
+int RGWDeleteBucketEncryption::verify_permission(optional_yield y)
+{
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketEncryption)) {
+ return -EACCES;
+ }
+ return 0;
}
-int RGWHandler::error_handler(int err_no, string *error_content) {
- // This is the do-nothing error handler
- return err_no;
+void RGWDeleteBucketEncryption::execute(optional_yield y)
+{
+ bufferlist data;
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
+ rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
+ attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_KEY_ID);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ return op_ret;
+ });
}
+
+void rgw_slo_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("path", path, obj);
+ JSONDecoder::decode_json("etag", etag, obj);
+ JSONDecoder::decode_json("size_bytes", size_bytes, obj);
+};
+