]>
Commit | Line | Data |
---|---|---|
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- | |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include <errno.h> | |
5 | #include <stdlib.h> | |
6 | #include <system_error> | |
7 | #include <unistd.h> | |
8 | ||
9 | #include <sstream> | |
10 | ||
11 | #include <boost/algorithm/string/predicate.hpp> | |
12 | #include <boost/bind.hpp> | |
13 | #include <boost/optional.hpp> | |
14 | #include <boost/utility/in_place_factory.hpp> | |
15 | #include <boost/utility/string_view.hpp> | |
16 | ||
17 | #include "common/Clock.h" | |
18 | #include "common/armor.h" | |
19 | #include "common/backport14.h" | |
20 | #include "common/errno.h" | |
21 | #include "common/mime.h" | |
22 | #include "common/utf8.h" | |
23 | #include "common/ceph_json.h" | |
24 | ||
25 | #include "rgw_rados.h" | |
26 | #include "rgw_op.h" | |
27 | #include "rgw_rest.h" | |
28 | #include "rgw_acl.h" | |
29 | #include "rgw_acl_s3.h" | |
30 | #include "rgw_acl_swift.h" | |
31 | #include "rgw_user.h" | |
32 | #include "rgw_bucket.h" | |
33 | #include "rgw_log.h" | |
34 | #include "rgw_multi.h" | |
35 | #include "rgw_multi_del.h" | |
36 | #include "rgw_cors.h" | |
37 | #include "rgw_cors_s3.h" | |
38 | #include "rgw_rest_conn.h" | |
39 | #include "rgw_rest_s3.h" | |
40 | #include "rgw_tar.h" | |
41 | #include "rgw_client_io.h" | |
42 | #include "rgw_compression.h" | |
43 | #include "rgw_role.h" | |
44 | #include "rgw_tag_s3.h" | |
45 | #include "cls/lock/cls_lock_client.h" | |
46 | #include "cls/rgw/cls_rgw_client.h" | |
47 | ||
48 | ||
49 | #include "include/assert.h" | |
50 | ||
51 | #include "compressor/Compressor.h" | |
52 | ||
53 | #include "rgw_acl_swift.h" | |
54 | ||
55 | #define dout_context g_ceph_context | |
56 | #define dout_subsys ceph_subsys_rgw | |
57 | ||
58 | using namespace std; | |
59 | using namespace librados; | |
60 | using ceph::crypto::MD5; | |
61 | using boost::optional; | |
62 | using boost::none; | |
63 | ||
64 | using rgw::IAM::ARN; | |
65 | using rgw::IAM::Effect; | |
66 | using rgw::IAM::Policy; | |
67 | ||
68 | using rgw::IAM::Policy; | |
69 | ||
70 | static string mp_ns = RGW_OBJ_NS_MULTIPART; | |
71 | static string shadow_ns = RGW_OBJ_NS_SHADOW; | |
72 | ||
73 | static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name); | |
74 | static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store, | |
75 | bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr); | |
76 | ||
77 | static MultipartMetaFilter mp_filter; | |
78 | ||
79 | static int parse_range(const char *range, off_t& ofs, off_t& end, bool *partial_content) | |
80 | { | |
81 | int r = -ERANGE; | |
82 | string s(range); | |
83 | string ofs_str; | |
84 | string end_str; | |
85 | ||
86 | *partial_content = false; | |
87 | ||
88 | size_t pos = s.find("bytes="); | |
89 | if (pos == string::npos) { | |
90 | pos = 0; | |
91 | while (isspace(s[pos])) | |
92 | pos++; | |
93 | int end = pos; | |
94 | while (isalpha(s[end])) | |
95 | end++; | |
96 | if (strncasecmp(s.c_str(), "bytes", end - pos) != 0) | |
97 | return 0; | |
98 | while (isspace(s[end])) | |
99 | end++; | |
100 | if (s[end] != '=') | |
101 | return 0; | |
102 | s = s.substr(end + 1); | |
103 | } else { | |
104 | s = s.substr(pos + 6); /* size of("bytes=") */ | |
105 | } | |
106 | pos = s.find('-'); | |
107 | if (pos == string::npos) | |
108 | goto done; | |
109 | ||
110 | *partial_content = true; | |
111 | ||
112 | ofs_str = s.substr(0, pos); | |
113 | end_str = s.substr(pos + 1); | |
114 | if (end_str.length()) { | |
115 | end = atoll(end_str.c_str()); | |
116 | if (end < 0) | |
117 | goto done; | |
118 | } | |
119 | ||
120 | if (ofs_str.length()) { | |
121 | ofs = atoll(ofs_str.c_str()); | |
122 | } else { // RFC2616 suffix-byte-range-spec | |
123 | ofs = -end; | |
124 | end = -1; | |
125 | } | |
126 | ||
127 | if (end >= 0 && end < ofs) | |
128 | goto done; | |
129 | ||
130 | r = 0; | |
131 | done: | |
132 | return r; | |
133 | } | |
134 | ||
135 | static int decode_policy(CephContext *cct, | |
136 | bufferlist& bl, | |
137 | RGWAccessControlPolicy *policy) | |
138 | { | |
139 | bufferlist::iterator iter = bl.begin(); | |
140 | try { | |
141 | policy->decode(iter); | |
142 | } catch (buffer::error& err) { | |
143 | ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; | |
144 | return -EIO; | |
145 | } | |
146 | if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
147 | RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy); | |
148 | ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; | |
149 | s3policy->to_xml(*_dout); | |
150 | *_dout << dendl; | |
151 | } | |
152 | return 0; | |
153 | } | |
154 | ||
155 | ||
156 | static int get_user_policy_from_attr(CephContext * const cct, | |
157 | RGWRados * const store, | |
158 | map<string, bufferlist>& attrs, | |
159 | RGWAccessControlPolicy& policy /* out */) | |
160 | { | |
161 | auto aiter = attrs.find(RGW_ATTR_ACL); | |
162 | if (aiter != attrs.end()) { | |
163 | int ret = decode_policy(cct, aiter->second, &policy); | |
164 | if (ret < 0) { | |
165 | return ret; | |
166 | } | |
167 | } else { | |
168 | return -ENOENT; | |
169 | } | |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
174 | static int get_bucket_instance_policy_from_attr(CephContext *cct, | |
175 | RGWRados *store, | |
176 | RGWBucketInfo& bucket_info, | |
177 | map<string, bufferlist>& bucket_attrs, | |
178 | RGWAccessControlPolicy *policy, | |
179 | rgw_raw_obj& obj) | |
180 | { | |
181 | map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL); | |
182 | ||
183 | if (aiter != bucket_attrs.end()) { | |
184 | int ret = decode_policy(cct, aiter->second, policy); | |
185 | if (ret < 0) | |
186 | return ret; | |
187 | } else { | |
188 | ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; | |
189 | RGWUserInfo uinfo; | |
190 | /* object exists, but policy is broken */ | |
191 | int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo); | |
192 | if (r < 0) | |
193 | return r; | |
194 | ||
195 | policy->create_default(bucket_info.owner, uinfo.display_name); | |
196 | } | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static int get_obj_policy_from_attr(CephContext *cct, | |
201 | RGWRados *store, | |
202 | RGWObjectCtx& obj_ctx, | |
203 | RGWBucketInfo& bucket_info, | |
204 | map<string, bufferlist>& bucket_attrs, | |
205 | RGWAccessControlPolicy *policy, | |
206 | rgw_obj& obj) | |
207 | { | |
208 | bufferlist bl; | |
209 | int ret = 0; | |
210 | ||
211 | RGWRados::Object op_target(store, bucket_info, obj_ctx, obj); | |
212 | RGWRados::Object::Read rop(&op_target); | |
213 | ||
214 | ret = rop.get_attr(RGW_ATTR_ACL, bl); | |
215 | if (ret >= 0) { | |
216 | ret = decode_policy(cct, bl, policy); | |
217 | if (ret < 0) | |
218 | return ret; | |
219 | } else if (ret == -ENODATA) { | |
220 | /* object exists, but policy is broken */ | |
221 | ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl; | |
222 | RGWUserInfo uinfo; | |
223 | ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo); | |
224 | if (ret < 0) | |
225 | return ret; | |
226 | ||
227 | policy->create_default(bucket_info.owner, uinfo.display_name); | |
228 | } | |
229 | return ret; | |
230 | } | |
231 | ||
232 | ||
233 | /** | |
234 | * Get the AccessControlPolicy for an object off of disk. | |
235 | * policy: must point to a valid RGWACL, and will be filled upon return. | |
236 | * bucket: name of the bucket containing the object. | |
237 | * object: name of the object to get the ACL for. | |
238 | * Returns: 0 on success, -ERR# otherwise. | |
239 | */ | |
240 | static int get_bucket_policy_from_attr(CephContext *cct, | |
241 | RGWRados *store, | |
242 | RGWBucketInfo& bucket_info, | |
243 | map<string, bufferlist>& bucket_attrs, | |
244 | RGWAccessControlPolicy *policy) | |
245 | { | |
246 | rgw_raw_obj instance_obj; | |
247 | store->get_bucket_instance_obj(bucket_info.bucket, instance_obj); | |
248 | return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs, | |
249 | policy, instance_obj); | |
250 | } | |
251 | ||
252 | static optional<Policy> get_iam_policy_from_attr(CephContext* cct, | |
253 | RGWRados* store, | |
254 | map<string, bufferlist>& attrs, | |
255 | const string& tenant) { | |
256 | auto i = attrs.find(RGW_ATTR_IAM_POLICY); | |
257 | if (i != attrs.end()) { | |
258 | return Policy(cct, tenant, i->second); | |
259 | } else { | |
260 | return none; | |
261 | } | |
262 | } | |
263 | ||
264 | static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs) | |
265 | { | |
266 | RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
267 | RGWRados::Object::Read read_op(&op_target); | |
268 | ||
269 | read_op.params.attrs = &attrs; | |
270 | ||
271 | return read_op.prepare(); | |
272 | } | |
273 | ||
274 | static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val) | |
275 | { | |
276 | map<string, bufferlist> attrs; | |
277 | RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
278 | RGWRados::Object::Read read_op(&op_target); | |
279 | ||
280 | read_op.params.attrs = &attrs; | |
281 | ||
282 | int r = read_op.prepare(); | |
283 | if (r < 0) { | |
284 | return r; | |
285 | } | |
286 | store->set_atomic(s->obj_ctx, read_op.state.obj); | |
287 | attrs[attr_name] = attr_val; | |
288 | return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL); | |
289 | } | |
290 | ||
291 | static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs, | |
292 | uint64_t *obj_size, RGWObjVersionTracker *objv_tracker) | |
293 | { | |
294 | RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
295 | RGWRados::SystemObject::Read rop(&src); | |
296 | ||
297 | rop.stat_params.attrs = &attrs; | |
298 | rop.stat_params.obj_size = obj_size; | |
299 | ||
300 | int ret = rop.stat(objv_tracker); | |
301 | return ret; | |
302 | } | |
303 | ||
304 | static int read_bucket_policy(RGWRados *store, | |
305 | struct req_state *s, | |
306 | RGWBucketInfo& bucket_info, | |
307 | map<string, bufferlist>& bucket_attrs, | |
308 | RGWAccessControlPolicy *policy, | |
309 | rgw_bucket& bucket) | |
310 | { | |
311 | if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { | |
312 | ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; | |
313 | return -ERR_USER_SUSPENDED; | |
314 | } | |
315 | ||
316 | if (bucket.name.empty()) { | |
317 | return 0; | |
318 | } | |
319 | ||
320 | int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy); | |
321 | if (ret == -ENOENT) { | |
322 | ret = -ERR_NO_SUCH_BUCKET; | |
323 | } | |
324 | ||
325 | return ret; | |
326 | } | |
327 | ||
328 | static int read_obj_policy(RGWRados *store, | |
329 | struct req_state *s, | |
330 | RGWBucketInfo& bucket_info, | |
331 | map<string, bufferlist>& bucket_attrs, | |
332 | RGWAccessControlPolicy* acl, | |
333 | optional<Policy>& policy, | |
334 | rgw_bucket& bucket, | |
335 | rgw_obj_key& object) | |
336 | { | |
337 | string upload_id; | |
338 | upload_id = s->info.args.get("uploadId"); | |
339 | rgw_obj obj; | |
340 | ||
341 | if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { | |
342 | ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; | |
343 | return -ERR_USER_SUSPENDED; | |
344 | } | |
345 | ||
346 | if (!upload_id.empty()) { | |
347 | /* multipart upload */ | |
348 | RGWMPObj mp(object.name, upload_id); | |
349 | string oid = mp.get_meta(); | |
350 | obj.init_ns(bucket, oid, mp_ns); | |
351 | obj.set_in_extra_data(true); | |
352 | } else { | |
353 | obj = rgw_obj(bucket, object); | |
354 | } | |
355 | policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant); | |
356 | ||
357 | RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx); | |
358 | int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx, | |
359 | bucket_info, bucket_attrs, acl, obj); | |
360 | if (ret == -ENOENT) { | |
361 | /* object does not exist checking the bucket's ACL to make sure | |
362 | that we send a proper error code */ | |
363 | RGWAccessControlPolicy bucket_policy(s->cct); | |
364 | ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy); | |
365 | if (ret < 0) { | |
366 | return ret; | |
367 | } | |
368 | ||
369 | const rgw_user& bucket_owner = bucket_policy.get_owner().get_id(); | |
370 | if (bucket_owner.compare(s->user->user_id) != 0 && | |
371 | ! s->auth.identity->is_admin_of(bucket_owner) && | |
372 | ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask, | |
373 | RGW_PERM_READ)) { | |
374 | ret = -EACCES; | |
375 | } else { | |
376 | ret = -ENOENT; | |
377 | } | |
378 | } | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | /** | |
384 | * Get the AccessControlPolicy for an user, bucket or object off of disk. | |
385 | * s: The req_state to draw information from. | |
386 | * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL. | |
387 | * Returns: 0 on success, -ERR# otherwise. | |
388 | */ | |
389 | int rgw_build_bucket_policies(RGWRados* store, struct req_state* s) | |
390 | { | |
391 | int ret = 0; | |
392 | rgw_obj_key obj; | |
393 | RGWUserInfo bucket_owner_info; | |
394 | RGWObjectCtx obj_ctx(store); | |
395 | ||
396 | string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance"); | |
397 | if (!bi.empty()) { | |
398 | ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id); | |
399 | if (ret < 0) { | |
400 | return ret; | |
401 | } | |
402 | } | |
403 | ||
404 | if(s->dialect.compare("s3") == 0) { | |
405 | s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct); | |
406 | } else if(s->dialect.compare("swift") == 0) { | |
407 | /* We aren't allocating the account policy for those operations using | |
408 | * the Swift's infrastructure that don't really need req_state::user. | |
409 | * Typical example here is the implementation of /info. */ | |
410 | if (!s->user->user_id.empty()) { | |
411 | s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct); | |
412 | } | |
413 | s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct); | |
414 | } else { | |
415 | s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct); | |
416 | } | |
417 | ||
418 | /* check if copy source is within the current domain */ | |
419 | if (!s->src_bucket_name.empty()) { | |
420 | RGWBucketInfo source_info; | |
421 | ||
422 | if (s->bucket_instance_id.empty()) { | |
423 | ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL); | |
424 | } else { | |
425 | ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL); | |
426 | } | |
427 | if (ret == 0) { | |
428 | string& zonegroup = source_info.zonegroup; | |
429 | s->local_source = store->get_zonegroup().equals(zonegroup); | |
430 | } | |
431 | } | |
432 | ||
433 | struct { | |
434 | rgw_user uid; | |
435 | std::string display_name; | |
436 | } acct_acl_user = { | |
437 | s->user->user_id, | |
438 | s->user->display_name, | |
439 | }; | |
440 | ||
441 | if (!s->bucket_name.empty()) { | |
442 | s->bucket_exists = true; | |
443 | if (s->bucket_instance_id.empty()) { | |
444 | ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs); | |
445 | } else { | |
446 | ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs); | |
447 | } | |
448 | if (ret < 0) { | |
449 | if (ret != -ENOENT) { | |
450 | string bucket_log; | |
451 | rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log); | |
452 | ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl; | |
453 | return ret; | |
454 | } | |
455 | s->bucket_exists = false; | |
456 | } | |
457 | s->bucket = s->bucket_info.bucket; | |
458 | ||
459 | if (s->bucket_exists) { | |
460 | ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs, | |
461 | s->bucket_acl.get(), s->bucket); | |
462 | acct_acl_user = { | |
463 | s->bucket_info.owner, | |
464 | s->bucket_acl->get_owner().get_display_name(), | |
465 | }; | |
466 | } else { | |
467 | s->bucket_acl->create_default(s->user->user_id, s->user->display_name); | |
468 | ret = -ERR_NO_SUCH_BUCKET; | |
469 | } | |
470 | ||
471 | s->bucket_owner = s->bucket_acl->get_owner(); | |
472 | ||
473 | RGWZoneGroup zonegroup; | |
474 | int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup); | |
475 | if (!r) { | |
476 | if (!zonegroup.endpoints.empty()) { | |
477 | s->zonegroup_endpoint = zonegroup.endpoints.front(); | |
478 | } else { | |
479 | // use zonegroup's master zone endpoints | |
480 | auto z = zonegroup.zones.find(zonegroup.master_zone); | |
481 | if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) { | |
482 | s->zonegroup_endpoint = z->second.endpoints.front(); | |
483 | } | |
484 | } | |
485 | s->zonegroup_name = zonegroup.get_name(); | |
486 | } | |
487 | if (r < 0 && ret == 0) { | |
488 | ret = r; | |
489 | } | |
490 | ||
491 | if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) { | |
492 | ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl; | |
493 | /* we now need to make sure that the operation actually requires copy source, that is | |
494 | * it's a copy operation | |
495 | */ | |
496 | if (store->get_zonegroup().is_master_zonegroup() && s->system_request) { | |
497 | /*If this is the master, don't redirect*/ | |
498 | } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) { | |
499 | /* If op is get bucket location, don't redirect */ | |
500 | } else if (!s->local_source || | |
501 | (s->op != OP_PUT && s->op != OP_COPY) || | |
502 | s->object.empty()) { | |
503 | return -ERR_PERMANENT_REDIRECT; | |
504 | } | |
505 | } | |
506 | } | |
507 | ||
508 | /* handle user ACL only for those APIs which support it */ | |
509 | if (s->user_acl) { | |
510 | map<string, bufferlist> uattrs; | |
511 | ||
512 | ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs); | |
513 | if (!ret) { | |
514 | ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl); | |
515 | } | |
516 | if (-ENOENT == ret) { | |
517 | /* In already existing clusters users won't have ACL. In such case | |
518 | * assuming that only account owner has the rights seems to be | |
519 | * reasonable. That allows to have only one verification logic. | |
520 | * NOTE: there is small compatibility kludge for global, empty tenant: | |
521 | * 1. if we try to reach an existing bucket, its owner is considered | |
522 | * as account owner. | |
523 | * 2. otherwise account owner is identity stored in s->user->user_id. */ | |
524 | s->user_acl->create_default(acct_acl_user.uid, | |
525 | acct_acl_user.display_name); | |
526 | ret = 0; | |
527 | } else { | |
528 | ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id=" | |
529 | << s->user->user_id | |
530 | << ", ret=" | |
531 | << ret | |
532 | << ")" << dendl; | |
533 | return ret; | |
534 | } | |
535 | } | |
536 | ||
537 | try { | |
538 | s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs, | |
539 | s->bucket_tenant); | |
540 | } catch (const std::exception& e) { | |
541 | // Really this is a can't happen condition. We parse the policy | |
542 | // when it's given to us, so perhaps we should abort or otherwise | |
543 | // raise bloody murder. | |
544 | lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl; | |
545 | ret = -EACCES; | |
546 | } | |
547 | ||
548 | return ret; | |
549 | } | |
550 | ||
551 | /** | |
552 | * Get the AccessControlPolicy for a bucket or object off of disk. | |
553 | * s: The req_state to draw information from. | |
554 | * only_bucket: If true, reads the bucket ACL rather than the object ACL. | |
555 | * Returns: 0 on success, -ERR# otherwise. | |
556 | */ | |
557 | int rgw_build_object_policies(RGWRados *store, struct req_state *s, | |
558 | bool prefetch_data) | |
559 | { | |
560 | int ret = 0; | |
561 | ||
562 | if (!s->object.empty()) { | |
563 | if (!s->bucket_exists) { | |
564 | return -ERR_NO_SUCH_BUCKET; | |
565 | } | |
566 | s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct); | |
567 | ||
568 | rgw_obj obj(s->bucket, s->object); | |
569 | ||
570 | store->set_atomic(s->obj_ctx, obj); | |
571 | if (prefetch_data) { | |
572 | store->set_prefetch_data(s->obj_ctx, obj); | |
573 | } | |
574 | ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs, | |
575 | s->object_acl.get(), s->iam_policy, s->bucket, | |
576 | s->object); | |
577 | } | |
578 | ||
579 | return ret; | |
580 | } | |
581 | ||
582 | rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store, | |
583 | struct req_state* s) | |
584 | { | |
585 | rgw::IAM::Environment e; | |
586 | const auto& m = s->info.env->get_map(); | |
587 | auto t = ceph::real_clock::now(); | |
588 | e.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t))); | |
589 | e.emplace("aws:EpochTime", ceph::to_iso_8601(t)); | |
590 | // TODO: This is fine for now, but once we have STS we'll need to | |
591 | // look and see. Also this won't work with the IdentityApplier | |
592 | // model, since we need to know the actual credential. | |
593 | e.emplace("aws:PrincipalType", "User"); | |
594 | ||
595 | auto i = m.find("HTTP_REFERER"); | |
596 | if (i != m.end()) { | |
597 | e.emplace("aws:Referer", i->second); | |
598 | } | |
599 | ||
600 | // These seem to be the semantics, judging from rest_rgw_s3.cc | |
601 | i = m.find("SERVER_PORT_SECURE"); | |
602 | if (i != m.end()) { | |
603 | e.emplace("aws:SecureTransport", "true"); | |
604 | } | |
605 | ||
606 | const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param; | |
607 | if (remote_addr_param.length()) { | |
608 | i = m.find(remote_addr_param); | |
609 | } else { | |
610 | i = m.find("REMOTE_ADDR"); | |
611 | } | |
612 | if (i != m.end()) { | |
613 | const string* ip = &(i->second); | |
614 | string temp; | |
615 | if (remote_addr_param == "HTTP_X_FORWARDED_FOR") { | |
616 | const auto comma = ip->find(','); | |
617 | if (comma != string::npos) { | |
618 | temp.assign(*ip, 0, comma); | |
619 | ip = &temp; | |
620 | } | |
621 | } | |
622 | e.emplace("aws:SourceIp", *ip); | |
623 | } | |
624 | ||
625 | i = m.find("HTTP_USER_AGENT"); { | |
626 | if (i != m.end()) | |
627 | e.emplace("aws:UserAgent", i->second); | |
628 | } | |
629 | ||
630 | if (s->user) { | |
631 | // What to do about aws::userid? One can have multiple access | |
632 | // keys so that isn't really suitable. Do we have a durable | |
633 | // identifier that can persist through name changes? | |
634 | e.emplace("aws:username", s->user->user_id.id); | |
635 | } | |
636 | return e; | |
637 | } | |
638 | ||
639 | void rgw_bucket_object_pre_exec(struct req_state *s) | |
640 | { | |
641 | if (s->expect_cont) | |
642 | dump_continue(s); | |
643 | ||
644 | dump_bucket_from_state(s); | |
645 | } | |
646 | ||
647 | // So! Now and then when we try to update bucket information, the | |
648 | // bucket has changed during the course of the operation. (Or we have | |
649 | // a cache consistency problem that Watch/Notify isn't ruling out | |
650 | // completely.) | |
651 | // | |
652 | // When this happens, we need to update the bucket info and try | |
653 | // again. We have, however, to try the right *part* again. We can't | |
654 | // simply re-send, since that will obliterate the previous update. | |
655 | // | |
656 | // Thus, callers of this function should include everything that | |
657 | // merges information to be changed into the bucket information as | |
658 | // well as the call to set it. | |
659 | // | |
660 | // The called function must return an integer, negative on error. In | |
661 | // general, they should just return op_ret. | |
662 | namespace { | |
663 | template<typename F> | |
664 | int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) { | |
665 | auto r = f(); | |
666 | for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) { | |
667 | r = g->try_refresh_bucket_info(s->bucket_info, nullptr, | |
668 | &s->bucket_attrs); | |
669 | if (r >= 0) { | |
670 | r = f(); | |
671 | } | |
672 | } | |
673 | return r; | |
674 | } | |
675 | } | |
676 | ||
677 | ||
678 | int RGWGetObj::verify_permission() | |
679 | { | |
680 | obj = rgw_obj(s->bucket, s->object); | |
681 | store->set_atomic(s->obj_ctx, obj); | |
682 | if (get_data) { | |
683 | store->set_prefetch_data(s->obj_ctx, obj); | |
684 | } | |
685 | ||
686 | if (torrent.get_flag()) { | |
687 | if (obj.key.instance.empty()) { | |
688 | action = rgw::IAM::s3GetObjectTorrent; | |
689 | } else { | |
690 | action = rgw::IAM::s3GetObjectVersionTorrent; | |
691 | } | |
692 | } else { | |
693 | if (obj.key.instance.empty()) { | |
694 | action = rgw::IAM::s3GetObject; | |
695 | } else { | |
696 | action = rgw::IAM::s3GetObjectVersion; | |
697 | } | |
698 | } | |
699 | ||
700 | if (!verify_object_permission(s, action)) { | |
701 | return -EACCES; | |
702 | } | |
703 | ||
704 | return 0; | |
705 | } | |
706 | ||
707 | ||
708 | int RGWOp::verify_op_mask() | |
709 | { | |
710 | uint32_t required_mask = op_mask(); | |
711 | ||
712 | ldout(s->cct, 20) << "required_mask= " << required_mask | |
713 | << " user.op_mask=" << s->user->op_mask << dendl; | |
714 | ||
715 | if ((s->user->op_mask & required_mask) != required_mask) { | |
716 | return -EPERM; | |
717 | } | |
718 | ||
719 | if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) { | |
720 | ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl; | |
721 | return -EPERM; | |
722 | } | |
723 | ||
724 | return 0; | |
725 | } | |
726 | ||
727 | int RGWGetObjTags::verify_permission() | |
728 | { | |
729 | if (!verify_object_permission(s, | |
730 | s->object.instance.empty() ? | |
731 | rgw::IAM::s3GetObjectTagging: | |
732 | rgw::IAM::s3GetObjectVersionTagging)) | |
733 | return -EACCES; | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
738 | void RGWGetObjTags::pre_exec() | |
739 | { | |
740 | rgw_bucket_object_pre_exec(s); | |
741 | } | |
742 | ||
743 | void RGWGetObjTags::execute() | |
744 | { | |
745 | rgw_obj obj; | |
746 | map<string,bufferlist> attrs; | |
747 | ||
748 | obj = rgw_obj(s->bucket, s->object); | |
749 | ||
750 | store->set_atomic(s->obj_ctx, obj); | |
751 | ||
752 | op_ret = get_obj_attrs(store, s, obj, attrs); | |
753 | if (op_ret < 0) { | |
754 | ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj | |
755 | << " ret=" << op_ret << dendl; | |
756 | return; | |
757 | } | |
758 | ||
759 | auto tags = attrs.find(RGW_ATTR_TAGS); | |
760 | if(tags != attrs.end()){ | |
761 | has_tags = true; | |
762 | tags_bl.append(tags->second); | |
763 | } | |
764 | send_response_data(tags_bl); | |
765 | } | |
766 | ||
767 | int RGWPutObjTags::verify_permission() | |
768 | { | |
769 | if (!verify_object_permission(s, | |
770 | s->object.instance.empty() ? | |
771 | rgw::IAM::s3PutObjectTagging: | |
772 | rgw::IAM::s3PutObjectVersionTagging)) | |
773 | return -EACCES; | |
774 | return 0; | |
775 | } | |
776 | ||
777 | void RGWPutObjTags::execute() | |
778 | { | |
779 | op_ret = get_params(); | |
780 | if (op_ret < 0) | |
781 | return; | |
782 | ||
783 | if (s->object.empty()){ | |
784 | op_ret= -EINVAL; // we only support tagging on existing objects | |
785 | return; | |
786 | } | |
787 | ||
788 | rgw_obj obj; | |
789 | obj = rgw_obj(s->bucket, s->object); | |
790 | store->set_atomic(s->obj_ctx, obj); | |
791 | op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl); | |
792 | if (op_ret == -ECANCELED){ | |
793 | op_ret = -ERR_TAG_CONFLICT; | |
794 | } | |
795 | } | |
796 | ||
797 | void RGWDeleteObjTags::pre_exec() | |
798 | { | |
799 | rgw_bucket_object_pre_exec(s); | |
800 | } | |
801 | ||
802 | ||
803 | int RGWDeleteObjTags::verify_permission() | |
804 | { | |
805 | if (!s->object.empty()) { | |
806 | if (!verify_object_permission(s, | |
807 | s->object.instance.empty() ? | |
808 | rgw::IAM::s3DeleteObjectTagging: | |
809 | rgw::IAM::s3DeleteObjectVersionTagging)) | |
810 | return -EACCES; | |
811 | } | |
812 | return 0; | |
813 | } | |
814 | ||
815 | void RGWDeleteObjTags::execute() | |
816 | { | |
817 | if (s->object.empty()) | |
818 | return; | |
819 | ||
820 | rgw_obj obj; | |
821 | obj = rgw_obj(s->bucket, s->object); | |
822 | store->set_atomic(s->obj_ctx, obj); | |
823 | map <string, bufferlist> attrs; | |
824 | map <string, bufferlist> rmattr; | |
825 | bufferlist bl; | |
826 | rmattr[RGW_ATTR_TAGS] = bl; | |
827 | op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr); | |
828 | } | |
829 | ||
830 | int RGWOp::do_aws4_auth_completion() | |
831 | { | |
832 | ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl; | |
833 | if (s->auth.completer) { | |
834 | if (!s->auth.completer->complete()) { | |
835 | return -ERR_AMZ_CONTENT_SHA256_MISMATCH; | |
836 | } else { | |
837 | dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl; | |
838 | } | |
839 | ||
840 | /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first | |
841 | * call passes, so we disable second one. This is old behaviour, sorry! | |
842 | * Plan for tomorrow: seek and destroy. */ | |
843 | s->auth.completer = nullptr; | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | int RGWOp::init_quota() | |
850 | { | |
851 | /* no quota enforcement for system requests */ | |
852 | if (s->system_request) | |
853 | return 0; | |
854 | ||
855 | /* init quota related stuff */ | |
856 | if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) { | |
857 | return 0; | |
858 | } | |
859 | ||
860 | /* only interested in object related ops */ | |
861 | if (s->object.empty()) { | |
862 | return 0; | |
863 | } | |
864 | ||
865 | RGWUserInfo owner_info; | |
866 | RGWUserInfo *uinfo; | |
867 | ||
868 | if (s->user->user_id == s->bucket_owner.get_id()) { | |
869 | uinfo = s->user; | |
870 | } else { | |
871 | int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info); | |
872 | if (r < 0) | |
873 | return r; | |
874 | uinfo = &owner_info; | |
875 | } | |
876 | ||
877 | if (s->bucket_info.quota.enabled) { | |
878 | bucket_quota = s->bucket_info.quota; | |
879 | } else if (uinfo->bucket_quota.enabled) { | |
880 | bucket_quota = uinfo->bucket_quota; | |
881 | } else { | |
882 | bucket_quota = store->get_bucket_quota(); | |
883 | } | |
884 | ||
885 | if (uinfo->user_quota.enabled) { | |
886 | user_quota = uinfo->user_quota; | |
887 | } else { | |
888 | user_quota = store->get_user_quota(); | |
889 | } | |
890 | ||
891 | return 0; | |
892 | } | |
893 | ||
894 | static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) { | |
895 | uint8_t flags = 0; | |
896 | ||
897 | if (!req_meth) { | |
898 | dout(5) << "req_meth is null" << dendl; | |
899 | return false; | |
900 | } | |
901 | ||
902 | if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET; | |
903 | else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST; | |
904 | else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT; | |
905 | else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE; | |
906 | else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD; | |
907 | ||
908 | if ((rule->get_allowed_methods() & flags) == flags) { | |
909 | dout(10) << "Method " << req_meth << " is supported" << dendl; | |
910 | } else { | |
911 | dout(5) << "Method " << req_meth << " is not supported" << dendl; | |
912 | return false; | |
913 | } | |
914 | ||
915 | return true; | |
916 | } | |
917 | ||
918 | static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) { | |
919 | if (req_hdrs) { | |
920 | vector<string> hdrs; | |
921 | get_str_vec(req_hdrs, hdrs); | |
922 | for (const auto& hdr : hdrs) { | |
923 | if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) { | |
924 | dout(5) << "Header " << hdr << " is not registered in this rule" << dendl; | |
925 | return false; | |
926 | } | |
927 | } | |
928 | } | |
929 | return true; | |
930 | } | |
931 | ||
932 | int RGWOp::read_bucket_cors() | |
933 | { | |
934 | bufferlist bl; | |
935 | ||
936 | map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS); | |
937 | if (aiter == s->bucket_attrs.end()) { | |
938 | ldout(s->cct, 20) << "no CORS configuration attr found" << dendl; | |
939 | cors_exist = false; | |
940 | return 0; /* no CORS configuration found */ | |
941 | } | |
942 | ||
943 | cors_exist = true; | |
944 | ||
945 | bl = aiter->second; | |
946 | ||
947 | bufferlist::iterator iter = bl.begin(); | |
948 | try { | |
949 | bucket_cors.decode(iter); | |
950 | } catch (buffer::error& err) { | |
951 | ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; | |
952 | return -EIO; | |
953 | } | |
954 | if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
955 | RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors); | |
956 | ldout(s->cct, 15) << "Read RGWCORSConfiguration"; | |
957 | s3cors->to_xml(*_dout); | |
958 | *_dout << dendl; | |
959 | } | |
960 | return 0; | |
961 | } | |
962 | ||
963 | /** CORS 6.2.6. | |
964 | * If any of the header field-names is not a ASCII case-insensitive match for | |
965 | * any of the values in list of headers do not set any additional headers and | |
966 | * terminate this set of steps. | |
967 | * */ | |
968 | static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) { | |
969 | if (req_hdrs) { | |
970 | list<string> hl; | |
971 | get_str_list(req_hdrs, hl); | |
972 | for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) { | |
973 | if (!rule->is_header_allowed((*it).c_str(), (*it).length())) { | |
974 | dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl; | |
975 | } else { | |
976 | if (hdrs.length() > 0) hdrs.append(","); | |
977 | hdrs.append((*it)); | |
978 | } | |
979 | } | |
980 | } | |
981 | rule->format_exp_headers(exp_hdrs); | |
982 | *max_age = rule->get_max_age(); | |
983 | } | |
984 | ||
985 | /** | |
986 | * Generate the CORS header response | |
987 | * | |
988 | * This is described in the CORS standard, section 6.2. | |
989 | */ | |
990 | bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age) | |
991 | { | |
992 | /* CORS 6.2.1. */ | |
993 | const char *orig = s->info.env->get("HTTP_ORIGIN"); | |
994 | if (!orig) { | |
995 | return false; | |
996 | } | |
997 | ||
998 | /* Custom: */ | |
999 | origin = orig; | |
1000 | op_ret = read_bucket_cors(); | |
1001 | if (op_ret < 0) { | |
1002 | return false; | |
1003 | } | |
1004 | ||
1005 | if (!cors_exist) { | |
1006 | dout(2) << "No CORS configuration set yet for this bucket" << dendl; | |
1007 | return false; | |
1008 | } | |
1009 | ||
1010 | /* CORS 6.2.2. */ | |
1011 | RGWCORSRule *rule = bucket_cors.host_name_rule(orig); | |
1012 | if (!rule) | |
1013 | return false; | |
1014 | ||
1015 | /* | |
1016 | * Set the Allowed-Origin header to a asterisk if this is allowed in the rule | |
1017 | * and no Authorization was send by the client | |
1018 | * | |
1019 | * The origin parameter specifies a URI that may access the resource. The browser must enforce this. | |
1020 | * For requests without credentials, the server may specify "*" as a wildcard, | |
1021 | * thereby allowing any origin to access the resource. | |
1022 | */ | |
1023 | const char *authorization = s->info.env->get("HTTP_AUTHORIZATION"); | |
1024 | if (!authorization && rule->has_wildcard_origin()) | |
1025 | origin = "*"; | |
1026 | ||
1027 | /* CORS 6.2.3. */ | |
1028 | const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); | |
1029 | if (!req_meth) { | |
1030 | req_meth = s->info.method; | |
1031 | } | |
1032 | ||
1033 | if (req_meth) { | |
1034 | method = req_meth; | |
1035 | /* CORS 6.2.5. */ | |
1036 | if (!validate_cors_rule_method(rule, req_meth)) { | |
1037 | return false; | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | /* CORS 6.2.4. */ | |
1042 | const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS"); | |
1043 | ||
1044 | /* CORS 6.2.6. */ | |
1045 | get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age); | |
1046 | ||
1047 | return true; | |
1048 | } | |
1049 | ||
1050 | int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, | |
1051 | const rgw_bucket_dir_entry& ent, | |
1052 | RGWAccessControlPolicy * const bucket_acl, | |
1053 | const optional<Policy>& bucket_policy, | |
1054 | const off_t start_ofs, | |
1055 | const off_t end_ofs) | |
1056 | { | |
1057 | ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl; | |
1058 | RGWGetObj_CB cb(this); | |
1059 | RGWGetDataCB* filter = &cb; | |
1060 | boost::optional<RGWGetObj_Decompress> decompress; | |
1061 | ||
1062 | int64_t cur_ofs = start_ofs; | |
1063 | int64_t cur_end = end_ofs; | |
1064 | ||
1065 | rgw_obj part(bucket, ent.key); | |
1066 | ||
1067 | map<string, bufferlist> attrs; | |
1068 | ||
1069 | uint64_t obj_size; | |
1070 | RGWObjectCtx obj_ctx(store); | |
1071 | RGWAccessControlPolicy obj_policy(s->cct); | |
1072 | ||
1073 | ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl; | |
1074 | ||
1075 | obj_ctx.obj.set_atomic(part); | |
1076 | store->set_prefetch_data(&obj_ctx, part); | |
1077 | ||
1078 | RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part); | |
1079 | RGWRados::Object::Read read_op(&op_target); | |
1080 | ||
1081 | read_op.conds.if_match = ent.meta.etag.c_str(); | |
1082 | read_op.params.attrs = &attrs; | |
1083 | read_op.params.obj_size = &obj_size; | |
1084 | ||
1085 | op_ret = read_op.prepare(); | |
1086 | if (op_ret < 0) | |
1087 | return op_ret; | |
1088 | op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end); | |
1089 | if (op_ret < 0) | |
1090 | return op_ret; | |
1091 | bool need_decompress; | |
1092 | op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); | |
1093 | if (op_ret < 0) { | |
1094 | lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; | |
1095 | return -EIO; | |
1096 | } | |
1097 | ||
1098 | if (need_decompress) | |
1099 | { | |
1100 | if (cs_info.orig_size != ent.meta.accounted_size) { | |
1101 | // hmm.. something wrong, object not as expected, abort! | |
1102 | ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size << | |
1103 | ", actual read size=" << ent.meta.size << dendl; | |
1104 | return -EIO; | |
1105 | } | |
1106 | decompress.emplace(s->cct, &cs_info, partial_content, filter); | |
1107 | filter = &*decompress; | |
1108 | } | |
1109 | else | |
1110 | { | |
1111 | if (obj_size != ent.meta.size) { | |
1112 | // hmm.. something wrong, object not as expected, abort! | |
1113 | ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl; | |
1114 | return -EIO; | |
1115 | } | |
1116 | } | |
1117 | ||
1118 | op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy); | |
1119 | if (op_ret < 0) | |
1120 | return op_ret; | |
1121 | ||
1122 | /* We can use global user_acl because LOs cannot have segments | |
1123 | * stored inside different accounts. */ | |
1124 | if (s->system_request) { | |
1125 | ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl; | |
1126 | } else if (s->auth.identity->is_admin_of(s->user->user_id)) { | |
1127 | ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl; | |
1128 | } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl, | |
1129 | &obj_policy, bucket_policy, action)) { | |
1130 | return -EPERM; | |
1131 | } | |
1132 | ||
1133 | if (ent.meta.size == 0) { | |
1134 | return 0; | |
1135 | } | |
1136 | ||
1137 | perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs); | |
1138 | filter->fixup_range(cur_ofs, cur_end); | |
1139 | op_ret = read_op.iterate(cur_ofs, cur_end, filter); | |
1140 | if (op_ret >= 0) | |
1141 | op_ret = filter->flush(); | |
1142 | return op_ret; | |
1143 | } | |
1144 | ||
1145 | static int iterate_user_manifest_parts(CephContext * const cct, | |
1146 | RGWRados * const store, | |
1147 | const off_t ofs, | |
1148 | const off_t end, | |
1149 | RGWBucketInfo *pbucket_info, | |
1150 | const string& obj_prefix, | |
1151 | RGWAccessControlPolicy * const bucket_acl, | |
1152 | const optional<Policy>& bucket_policy, | |
1153 | uint64_t * const ptotal_len, | |
1154 | uint64_t * const pobj_size, | |
1155 | string * const pobj_sum, | |
1156 | int (*cb)(rgw_bucket& bucket, | |
1157 | const rgw_bucket_dir_entry& ent, | |
1158 | RGWAccessControlPolicy * const bucket_acl, | |
1159 | const optional<Policy>& bucket_policy, | |
1160 | off_t start_ofs, | |
1161 | off_t end_ofs, | |
1162 | void *param), | |
1163 | void * const cb_param) | |
1164 | { | |
1165 | rgw_bucket& bucket = pbucket_info->bucket; | |
1166 | uint64_t obj_ofs = 0, len_count = 0; | |
1167 | bool found_start = false, found_end = false, handled_end = false; | |
1168 | string delim; | |
1169 | bool is_truncated; | |
1170 | vector<rgw_bucket_dir_entry> objs; | |
1171 | ||
1172 | utime_t start_time = ceph_clock_now(); | |
1173 | ||
1174 | RGWRados::Bucket target(store, *pbucket_info); | |
1175 | RGWRados::Bucket::List list_op(&target); | |
1176 | ||
1177 | list_op.params.prefix = obj_prefix; | |
1178 | list_op.params.delim = delim; | |
1179 | ||
1180 | MD5 etag_sum; | |
1181 | do { | |
1182 | #define MAX_LIST_OBJS 100 | |
1183 | int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated); | |
1184 | if (r < 0) { | |
1185 | return r; | |
1186 | } | |
1187 | ||
1188 | for (rgw_bucket_dir_entry& ent : objs) { | |
1189 | const uint64_t cur_total_len = obj_ofs; | |
1190 | const uint64_t obj_size = ent.meta.accounted_size; | |
1191 | uint64_t start_ofs = 0, end_ofs = obj_size; | |
1192 | ||
1193 | if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) { | |
1194 | start_ofs = ofs - obj_ofs; | |
1195 | found_start = true; | |
1196 | } | |
1197 | ||
1198 | obj_ofs += obj_size; | |
1199 | if (pobj_sum) { | |
1200 | etag_sum.Update((const byte *)ent.meta.etag.c_str(), | |
1201 | ent.meta.etag.length()); | |
1202 | } | |
1203 | ||
1204 | if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) { | |
1205 | end_ofs = end - cur_total_len + 1; | |
1206 | found_end = true; | |
1207 | } | |
1208 | ||
1209 | perfcounter->tinc(l_rgw_get_lat, | |
1210 | (ceph_clock_now() - start_time)); | |
1211 | ||
1212 | if (found_start && !handled_end) { | |
1213 | len_count += end_ofs - start_ofs; | |
1214 | ||
1215 | if (cb) { | |
1216 | r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param); | |
1217 | if (r < 0) { | |
1218 | return r; | |
1219 | } | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | handled_end = found_end; | |
1224 | start_time = ceph_clock_now(); | |
1225 | } | |
1226 | } while (is_truncated); | |
1227 | ||
1228 | if (ptotal_len) { | |
1229 | *ptotal_len = len_count; | |
1230 | } | |
1231 | if (pobj_size) { | |
1232 | *pobj_size = obj_ofs; | |
1233 | } | |
1234 | if (pobj_sum) { | |
1235 | complete_etag(etag_sum, pobj_sum); | |
1236 | } | |
1237 | ||
1238 | return 0; | |
1239 | } | |
1240 | ||
1241 | struct rgw_slo_part { | |
1242 | RGWAccessControlPolicy *bucket_acl = nullptr; | |
1243 | Policy* bucket_policy = nullptr; | |
1244 | rgw_bucket bucket; | |
1245 | string obj_name; | |
1246 | uint64_t size = 0; | |
1247 | string etag; | |
1248 | }; | |
1249 | ||
1250 | static int iterate_slo_parts(CephContext *cct, | |
1251 | RGWRados *store, | |
1252 | off_t ofs, | |
1253 | off_t end, | |
1254 | map<uint64_t, rgw_slo_part>& slo_parts, | |
1255 | int (*cb)(rgw_bucket& bucket, | |
1256 | const rgw_bucket_dir_entry& ent, | |
1257 | RGWAccessControlPolicy *bucket_acl, | |
1258 | const optional<Policy>& bucket_policy, | |
1259 | off_t start_ofs, | |
1260 | off_t end_ofs, | |
1261 | void *param), | |
1262 | void *cb_param) | |
1263 | { | |
1264 | bool found_start = false, found_end = false; | |
1265 | ||
1266 | if (slo_parts.empty()) { | |
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | utime_t start_time = ceph_clock_now(); | |
1271 | ||
1272 | map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs); | |
1273 | if (iter != slo_parts.begin()) { | |
1274 | --iter; | |
1275 | } | |
1276 | ||
1277 | uint64_t obj_ofs = iter->first; | |
1278 | ||
1279 | for (; iter != slo_parts.end() && !found_end; ++iter) { | |
1280 | rgw_slo_part& part = iter->second; | |
1281 | rgw_bucket_dir_entry ent; | |
1282 | ||
1283 | ent.key.name = part.obj_name; | |
1284 | ent.meta.accounted_size = ent.meta.size = part.size; | |
1285 | ent.meta.etag = part.etag; | |
1286 | ||
1287 | uint64_t cur_total_len = obj_ofs; | |
1288 | uint64_t start_ofs = 0, end_ofs = ent.meta.size; | |
1289 | ||
1290 | if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) { | |
1291 | start_ofs = ofs - obj_ofs; | |
1292 | found_start = true; | |
1293 | } | |
1294 | ||
1295 | obj_ofs += ent.meta.size; | |
1296 | ||
1297 | if (!found_end && obj_ofs > (uint64_t)end) { | |
1298 | end_ofs = end - cur_total_len + 1; | |
1299 | found_end = true; | |
1300 | } | |
1301 | ||
1302 | perfcounter->tinc(l_rgw_get_lat, | |
1303 | (ceph_clock_now() - start_time)); | |
1304 | ||
1305 | if (found_start) { | |
1306 | if (cb) { | |
1307 | // SLO is a Swift thing, and Swift has no knowledge of S3 Policies. | |
1308 | int r = cb(part.bucket, ent, part.bucket_acl, | |
1309 | (part.bucket_policy ? | |
1310 | optional<Policy>(*part.bucket_policy) : none), | |
1311 | start_ofs, end_ofs, cb_param); | |
1312 | if (r < 0) | |
1313 | return r; | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | start_time = ceph_clock_now(); | |
1318 | } | |
1319 | ||
1320 | return 0; | |
1321 | } | |
1322 | ||
1323 | static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket, | |
1324 | const rgw_bucket_dir_entry& ent, | |
1325 | RGWAccessControlPolicy * const bucket_acl, | |
1326 | const optional<Policy>& bucket_policy, | |
1327 | const off_t start_ofs, | |
1328 | const off_t end_ofs, | |
1329 | void * const param) | |
1330 | { | |
1331 | RGWGetObj *op = static_cast<RGWGetObj *>(param); | |
1332 | return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs); | |
1333 | } | |
1334 | ||
1335 | int RGWGetObj::handle_user_manifest(const char *prefix) | |
1336 | { | |
1337 | const boost::string_view prefix_view(prefix); | |
1338 | ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix=" | |
1339 | << prefix_view << dendl; | |
1340 | ||
1341 | const size_t pos = prefix_view.find('/'); | |
1342 | if (pos == string::npos) { | |
1343 | return -EINVAL; | |
1344 | } | |
1345 | ||
1346 | const std::string bucket_name = url_decode(prefix_view.substr(0, pos)); | |
1347 | const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1)); | |
1348 | ||
1349 | rgw_bucket bucket; | |
1350 | ||
1351 | RGWAccessControlPolicy _bucket_acl(s->cct); | |
1352 | RGWAccessControlPolicy *bucket_acl; | |
1353 | optional<Policy> _bucket_policy; | |
1354 | optional<Policy>* bucket_policy; | |
1355 | RGWBucketInfo bucket_info; | |
1356 | RGWBucketInfo *pbucket_info; | |
1357 | ||
1358 | if (bucket_name.compare(s->bucket.name) != 0) { | |
1359 | map<string, bufferlist> bucket_attrs; | |
1360 | RGWObjectCtx obj_ctx(store); | |
1361 | int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant, | |
1362 | bucket_name, bucket_info, NULL, | |
1363 | &bucket_attrs); | |
1364 | if (r < 0) { | |
1365 | ldout(s->cct, 0) << "could not get bucket info for bucket=" | |
1366 | << bucket_name << dendl; | |
1367 | return r; | |
1368 | } | |
1369 | bucket = bucket_info.bucket; | |
1370 | pbucket_info = &bucket_info; | |
1371 | bucket_acl = &_bucket_acl; | |
1372 | r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket); | |
1373 | if (r < 0) { | |
1374 | ldout(s->cct, 0) << "failed to read bucket policy" << dendl; | |
1375 | return r; | |
1376 | } | |
1377 | _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, | |
1378 | bucket_info.bucket.tenant); | |
1379 | bucket_policy = &_bucket_policy; | |
1380 | } else { | |
1381 | bucket = s->bucket; | |
1382 | pbucket_info = &s->bucket_info; | |
1383 | bucket_acl = s->bucket_acl.get(); | |
1384 | bucket_policy = &s->iam_policy; | |
1385 | } | |
1386 | ||
1387 | /* dry run to find out: | |
1388 | * - total length (of the parts we are going to send to client), | |
1389 | * - overall DLO's content size, | |
1390 | * - md5 sum of overall DLO's content (for etag of Swift API). */ | |
1391 | int r = iterate_user_manifest_parts(s->cct, store, ofs, end, | |
1392 | pbucket_info, obj_prefix, bucket_acl, *bucket_policy, | |
1393 | nullptr, &s->obj_size, &lo_etag, | |
1394 | nullptr /* cb */, nullptr /* cb arg */); | |
1395 | if (r < 0) { | |
1396 | return r; | |
1397 | } | |
1398 | ||
1399 | r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end); | |
1400 | if (r < 0) { | |
1401 | return r; | |
1402 | } | |
1403 | ||
1404 | r = iterate_user_manifest_parts(s->cct, store, ofs, end, | |
1405 | pbucket_info, obj_prefix, bucket_acl, *bucket_policy, | |
1406 | &total_len, nullptr, nullptr, | |
1407 | nullptr, nullptr); | |
1408 | if (r < 0) { | |
1409 | return r; | |
1410 | } | |
1411 | ||
1412 | if (!get_data) { | |
1413 | bufferlist bl; | |
1414 | send_response_data(bl, 0, 0); | |
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | r = iterate_user_manifest_parts(s->cct, store, ofs, end, | |
1419 | pbucket_info, obj_prefix, bucket_acl, *bucket_policy, | |
1420 | nullptr, nullptr, nullptr, | |
1421 | get_obj_user_manifest_iterate_cb, (void *)this); | |
1422 | if (r < 0) { | |
1423 | return r; | |
1424 | } | |
1425 | ||
1426 | if (!total_len) { | |
1427 | bufferlist bl; | |
1428 | send_response_data(bl, 0, 0); | |
1429 | } | |
1430 | ||
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | int RGWGetObj::handle_slo_manifest(bufferlist& bl) | |
1435 | { | |
1436 | RGWSLOInfo slo_info; | |
1437 | bufferlist::iterator bliter = bl.begin(); | |
1438 | try { | |
1439 | ::decode(slo_info, bliter); | |
1440 | } catch (buffer::error& err) { | |
1441 | ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl; | |
1442 | return -EIO; | |
1443 | } | |
1444 | ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl; | |
1445 | ||
1446 | vector<RGWAccessControlPolicy> allocated_acls; | |
1447 | map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies; | |
1448 | map<string, rgw_bucket> buckets; | |
1449 | ||
1450 | map<uint64_t, rgw_slo_part> slo_parts; | |
1451 | ||
1452 | MD5 etag_sum; | |
1453 | total_len = 0; | |
1454 | ||
1455 | for (const auto& entry : slo_info.entries) { | |
1456 | const string& path = entry.path; | |
1457 | ||
1458 | /* If the path starts with slashes, strip them all. */ | |
1459 | const size_t pos_init = path.find_first_not_of('/'); | |
1460 | /* According to the documentation of std::string::find following check | |
1461 | * is not necessary as we should get the std::string::npos propagation | |
1462 | * here. This might be true with the accuracy to implementation's bugs. | |
1463 | * See following question on SO: | |
1464 | * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos | |
1465 | */ | |
1466 | if (pos_init == string::npos) { | |
1467 | return -EINVAL; | |
1468 | } | |
1469 | ||
1470 | const size_t pos_sep = path.find('/', pos_init); | |
1471 | if (pos_sep == string::npos) { | |
1472 | return -EINVAL; | |
1473 | } | |
1474 | ||
1475 | string bucket_name = path.substr(pos_init, pos_sep - pos_init); | |
1476 | string obj_name = path.substr(pos_sep + 1); | |
1477 | ||
1478 | rgw_bucket bucket; | |
1479 | RGWAccessControlPolicy *bucket_acl; | |
1480 | Policy* bucket_policy; | |
1481 | ||
1482 | if (bucket_name.compare(s->bucket.name) != 0) { | |
1483 | const auto& piter = policies.find(bucket_name); | |
1484 | if (piter != policies.end()) { | |
1485 | bucket_acl = piter->second.first; | |
1486 | bucket_policy = piter->second.second.get_ptr(); | |
1487 | bucket = buckets[bucket_name]; | |
1488 | } else { | |
1489 | allocated_acls.push_back(RGWAccessControlPolicy(s->cct)); | |
1490 | RGWAccessControlPolicy& _bucket_acl = allocated_acls.back(); | |
1491 | ||
1492 | RGWBucketInfo bucket_info; | |
1493 | map<string, bufferlist> bucket_attrs; | |
1494 | RGWObjectCtx obj_ctx(store); | |
1495 | int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant, | |
1496 | bucket_name, bucket_info, nullptr, | |
1497 | &bucket_attrs); | |
1498 | if (r < 0) { | |
1499 | ldout(s->cct, 0) << "could not get bucket info for bucket=" | |
1500 | << bucket_name << dendl; | |
1501 | return r; | |
1502 | } | |
1503 | bucket = bucket_info.bucket; | |
1504 | bucket_acl = &_bucket_acl; | |
1505 | r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, | |
1506 | bucket); | |
1507 | if (r < 0) { | |
1508 | ldout(s->cct, 0) << "failed to read bucket ACL for bucket " | |
1509 | << bucket << dendl; | |
1510 | return r; | |
1511 | } | |
1512 | auto _bucket_policy = get_iam_policy_from_attr( | |
1513 | s->cct, store, bucket_attrs, bucket_info.bucket.tenant); | |
1514 | bucket_policy = _bucket_policy.get_ptr(); | |
1515 | buckets[bucket_name] = bucket; | |
1516 | policies[bucket_name] = make_pair(bucket_acl, _bucket_policy); | |
1517 | } | |
1518 | } else { | |
1519 | bucket = s->bucket; | |
1520 | bucket_acl = s->bucket_acl.get(); | |
1521 | bucket_policy = s->iam_policy.get_ptr(); | |
1522 | } | |
1523 | ||
1524 | rgw_slo_part part; | |
1525 | part.bucket_acl = bucket_acl; | |
1526 | part.bucket_policy = bucket_policy; | |
1527 | part.bucket = bucket; | |
1528 | part.obj_name = obj_name; | |
1529 | part.size = entry.size_bytes; | |
1530 | part.etag = entry.etag; | |
1531 | ldout(s->cct, 20) << "slo_part: ofs=" << ofs | |
1532 | << " bucket=" << part.bucket | |
1533 | << " obj=" << part.obj_name | |
1534 | << " size=" << part.size | |
1535 | << " etag=" << part.etag | |
1536 | << dendl; | |
1537 | ||
1538 | etag_sum.Update((const byte *)entry.etag.c_str(), | |
1539 | entry.etag.length()); | |
1540 | ||
1541 | slo_parts[total_len] = part; | |
1542 | total_len += part.size; | |
1543 | } | |
1544 | ||
1545 | complete_etag(etag_sum, &lo_etag); | |
1546 | ||
1547 | s->obj_size = slo_info.total_size; | |
1548 | ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl; | |
1549 | ||
1550 | int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end); | |
1551 | if (r < 0) { | |
1552 | return r; | |
1553 | } | |
1554 | ||
1555 | total_len = end - ofs + 1; | |
1556 | ||
1557 | r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts, | |
1558 | get_obj_user_manifest_iterate_cb, (void *)this); | |
1559 | if (r < 0) { | |
1560 | return r; | |
1561 | } | |
1562 | ||
1563 | return 0; | |
1564 | } | |
1565 | ||
1566 | int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) | |
1567 | { | |
1568 | /* garbage collection related handling */ | |
1569 | utime_t start_time = ceph_clock_now(); | |
1570 | if (start_time > gc_invalidate_time) { | |
1571 | int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj); | |
1572 | if (r < 0) { | |
1573 | dout(0) << "WARNING: could not defer gc entry for obj" << dendl; | |
1574 | } | |
1575 | gc_invalidate_time = start_time; | |
1576 | gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2); | |
1577 | } | |
1578 | return send_response_data(bl, bl_ofs, bl_len); | |
1579 | } | |
1580 | ||
1581 | bool RGWGetObj::prefetch_data() | |
1582 | { | |
1583 | /* HEAD request, stop prefetch*/ | |
1584 | if (!get_data) { | |
1585 | return false; | |
1586 | } | |
1587 | ||
1588 | bool prefetch_first_chunk = true; | |
1589 | range_str = s->info.env->get("HTTP_RANGE"); | |
1590 | ||
1591 | if(range_str) { | |
1592 | int r = parse_range(range_str, ofs, end, &partial_content); | |
1593 | /* error on parsing the range, stop prefetch and will fail in execte() */ | |
1594 | if (r < 0) { | |
1595 | range_parsed = false; | |
1596 | return false; | |
1597 | } else { | |
1598 | range_parsed = true; | |
1599 | } | |
1600 | /* range get goes to shadown objects, stop prefetch */ | |
1601 | if (ofs >= s->cct->_conf->rgw_max_chunk_size) { | |
1602 | prefetch_first_chunk = false; | |
1603 | } | |
1604 | } | |
1605 | ||
1606 | return get_data && prefetch_first_chunk; | |
1607 | } | |
1608 | void RGWGetObj::pre_exec() | |
1609 | { | |
1610 | rgw_bucket_object_pre_exec(s); | |
1611 | } | |
1612 | ||
1613 | static bool object_is_expired(map<string, bufferlist>& attrs) { | |
1614 | map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT); | |
1615 | if (iter != attrs.end()) { | |
1616 | utime_t delete_at; | |
1617 | try { | |
1618 | ::decode(delete_at, iter->second); | |
1619 | } catch (buffer::error& err) { | |
1620 | dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl; | |
1621 | return false; | |
1622 | } | |
1623 | ||
1624 | if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) { | |
1625 | return true; | |
1626 | } | |
1627 | } | |
1628 | ||
1629 | return false; | |
1630 | } | |
1631 | ||
1632 | void RGWGetObj::execute() | |
1633 | { | |
1634 | utime_t start_time = s->time; | |
1635 | bufferlist bl; | |
1636 | gc_invalidate_time = ceph_clock_now(); | |
1637 | gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2); | |
1638 | ||
1639 | bool need_decompress; | |
1640 | int64_t ofs_x, end_x; | |
1641 | ||
1642 | RGWGetObj_CB cb(this); | |
1643 | RGWGetDataCB* filter = (RGWGetDataCB*)&cb; | |
1644 | boost::optional<RGWGetObj_Decompress> decompress; | |
1645 | std::unique_ptr<RGWGetDataCB> decrypt; | |
1646 | map<string, bufferlist>::iterator attr_iter; | |
1647 | ||
1648 | perfcounter->inc(l_rgw_get); | |
1649 | ||
1650 | RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
1651 | RGWRados::Object::Read read_op(&op_target); | |
1652 | ||
1653 | op_ret = get_params(); | |
1654 | if (op_ret < 0) | |
1655 | goto done_err; | |
1656 | ||
1657 | op_ret = init_common(); | |
1658 | if (op_ret < 0) | |
1659 | goto done_err; | |
1660 | ||
1661 | read_op.conds.mod_ptr = mod_ptr; | |
1662 | read_op.conds.unmod_ptr = unmod_ptr; | |
1663 | read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */ | |
1664 | read_op.conds.mod_zone_id = mod_zone_id; | |
1665 | read_op.conds.mod_pg_ver = mod_pg_ver; | |
1666 | read_op.conds.if_match = if_match; | |
1667 | read_op.conds.if_nomatch = if_nomatch; | |
1668 | read_op.params.attrs = &attrs; | |
1669 | read_op.params.lastmod = &lastmod; | |
1670 | read_op.params.obj_size = &s->obj_size; | |
1671 | ||
1672 | op_ret = read_op.prepare(); | |
1673 | if (op_ret < 0) | |
1674 | goto done_err; | |
1675 | version_id = read_op.state.obj.key.instance; | |
1676 | ||
1677 | /* STAT ops don't need data, and do no i/o */ | |
1678 | if (get_type() == RGW_OP_STAT_OBJ) { | |
1679 | return; | |
1680 | } | |
1681 | ||
1682 | /* start gettorrent */ | |
1683 | if (torrent.get_flag()) | |
1684 | { | |
1685 | attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE); | |
1686 | if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") { | |
1687 | op_ret = -ERR_INVALID_REQUEST; | |
1688 | goto done_err; | |
1689 | } | |
1690 | torrent.init(s, store); | |
1691 | op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj); | |
1692 | if (op_ret < 0) | |
1693 | { | |
1694 | ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret | |
1695 | << dendl; | |
1696 | goto done_err; | |
1697 | } | |
1698 | op_ret = send_response_data(bl, 0, total_len); | |
1699 | if (op_ret < 0) | |
1700 | { | |
1701 | ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret | |
1702 | << dendl; | |
1703 | goto done_err; | |
1704 | } | |
1705 | return; | |
1706 | } | |
1707 | /* end gettorrent */ | |
1708 | ||
1709 | op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); | |
1710 | if (op_ret < 0) { | |
1711 | lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; | |
1712 | goto done_err; | |
1713 | } | |
1714 | if (need_decompress) { | |
1715 | s->obj_size = cs_info.orig_size; | |
1716 | decompress.emplace(s->cct, &cs_info, partial_content, filter); | |
1717 | filter = &*decompress; | |
1718 | } | |
1719 | ||
1720 | attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST); | |
1721 | if (attr_iter != attrs.end() && !skip_manifest) { | |
1722 | op_ret = handle_user_manifest(attr_iter->second.c_str()); | |
1723 | if (op_ret < 0) { | |
1724 | ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret=" | |
1725 | << op_ret << dendl; | |
1726 | goto done_err; | |
1727 | } | |
1728 | return; | |
1729 | } | |
1730 | ||
1731 | attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST); | |
1732 | if (attr_iter != attrs.end() && !skip_manifest) { | |
1733 | is_slo = true; | |
1734 | op_ret = handle_slo_manifest(attr_iter->second); | |
1735 | if (op_ret < 0) { | |
1736 | ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret | |
1737 | << dendl; | |
1738 | goto done_err; | |
1739 | } | |
1740 | return; | |
1741 | } | |
1742 | ||
1743 | // for range requests with obj size 0 | |
1744 | if (range_str && !(s->obj_size)) { | |
1745 | total_len = 0; | |
1746 | op_ret = -ERANGE; | |
1747 | goto done_err; | |
1748 | } | |
1749 | ||
1750 | op_ret = read_op.range_to_ofs(s->obj_size, ofs, end); | |
1751 | if (op_ret < 0) | |
1752 | goto done_err; | |
1753 | total_len = (ofs <= end ? end + 1 - ofs : 0); | |
1754 | ||
1755 | /* Check whether the object has expired. Swift API documentation | |
1756 | * stands that we should return 404 Not Found in such case. */ | |
1757 | if (need_object_expiration() && object_is_expired(attrs)) { | |
1758 | op_ret = -ENOENT; | |
1759 | goto done_err; | |
1760 | } | |
1761 | ||
1762 | start = ofs; | |
1763 | ||
1764 | /* STAT ops don't need data, and do no i/o */ | |
1765 | if (get_type() == RGW_OP_STAT_OBJ) { | |
1766 | return; | |
1767 | } | |
1768 | ||
1769 | attr_iter = attrs.find(RGW_ATTR_MANIFEST); | |
1770 | op_ret = this->get_decrypt_filter(&decrypt, filter, | |
1771 | attr_iter != attrs.end() ? &(attr_iter->second) : nullptr); | |
1772 | if (decrypt != nullptr) { | |
1773 | filter = decrypt.get(); | |
1774 | } | |
1775 | if (op_ret < 0) { | |
1776 | goto done_err; | |
1777 | } | |
1778 | ||
1779 | if (!get_data || ofs > end) { | |
1780 | send_response_data(bl, 0, 0); | |
1781 | return; | |
1782 | } | |
1783 | ||
1784 | perfcounter->inc(l_rgw_get_b, end - ofs); | |
1785 | ||
1786 | ofs_x = ofs; | |
1787 | end_x = end; | |
1788 | filter->fixup_range(ofs_x, end_x); | |
1789 | op_ret = read_op.iterate(ofs_x, end_x, filter); | |
1790 | ||
1791 | if (op_ret >= 0) | |
1792 | op_ret = filter->flush(); | |
1793 | ||
1794 | perfcounter->tinc(l_rgw_get_lat, | |
1795 | (ceph_clock_now() - start_time)); | |
1796 | if (op_ret < 0) { | |
1797 | goto done_err; | |
1798 | } | |
1799 | ||
1800 | op_ret = send_response_data(bl, 0, 0); | |
1801 | if (op_ret < 0) { | |
1802 | goto done_err; | |
1803 | } | |
1804 | return; | |
1805 | ||
1806 | done_err: | |
1807 | send_response_data_error(); | |
1808 | } | |
1809 | ||
1810 | int RGWGetObj::init_common() | |
1811 | { | |
1812 | if (range_str) { | |
1813 | /* range parsed error when prefetch*/ | |
1814 | if (!range_parsed) { | |
1815 | int r = parse_range(range_str, ofs, end, &partial_content); | |
1816 | if (r < 0) | |
1817 | return r; | |
1818 | } | |
1819 | } | |
1820 | if (if_mod) { | |
1821 | if (parse_time(if_mod, &mod_time) < 0) | |
1822 | return -EINVAL; | |
1823 | mod_ptr = &mod_time; | |
1824 | } | |
1825 | ||
1826 | if (if_unmod) { | |
1827 | if (parse_time(if_unmod, &unmod_time) < 0) | |
1828 | return -EINVAL; | |
1829 | unmod_ptr = &unmod_time; | |
1830 | } | |
1831 | ||
1832 | return 0; | |
1833 | } | |
1834 | ||
1835 | int RGWListBuckets::verify_permission() | |
1836 | { | |
1837 | if (!verify_user_permission(s, RGW_PERM_READ)) { | |
1838 | return -EACCES; | |
1839 | } | |
1840 | ||
1841 | return 0; | |
1842 | } | |
1843 | ||
1844 | int RGWGetUsage::verify_permission() | |
1845 | { | |
1846 | if (s->auth.identity->is_anonymous()) { | |
1847 | return -EACCES; | |
1848 | } | |
1849 | ||
1850 | return 0; | |
1851 | } | |
1852 | ||
1853 | void RGWListBuckets::execute() | |
1854 | { | |
1855 | bool done; | |
1856 | bool started = false; | |
1857 | uint64_t total_count = 0; | |
1858 | ||
1859 | const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk; | |
1860 | ||
1861 | op_ret = get_params(); | |
1862 | if (op_ret < 0) { | |
1863 | goto send_end; | |
1864 | } | |
1865 | ||
1866 | if (supports_account_metadata()) { | |
1867 | op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs); | |
1868 | if (op_ret < 0) { | |
1869 | goto send_end; | |
1870 | } | |
1871 | } | |
1872 | ||
1873 | is_truncated = false; | |
1874 | do { | |
1875 | RGWUserBuckets buckets; | |
1876 | uint64_t read_count; | |
1877 | if (limit >= 0) { | |
1878 | read_count = min(limit - total_count, (uint64_t)max_buckets); | |
1879 | } else { | |
1880 | read_count = max_buckets; | |
1881 | } | |
1882 | ||
1883 | op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, | |
1884 | marker, end_marker, read_count, | |
1885 | should_get_stats(), &is_truncated, | |
1886 | get_default_max()); | |
1887 | if (op_ret < 0) { | |
1888 | /* hmm.. something wrong here.. the user was authenticated, so it | |
1889 | should exist */ | |
1890 | ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid=" | |
1891 | << s->user->user_id << dendl; | |
1892 | break; | |
1893 | } | |
1894 | ||
1895 | /* We need to have stats for all our policies - even if a given policy | |
1896 | * isn't actually used in a given account. In such situation its usage | |
1897 | * stats would be simply full of zeros. */ | |
1898 | for (const auto& policy : store->get_zonegroup().placement_targets) { | |
1899 | policies_stats.emplace(policy.second.name, | |
1900 | decltype(policies_stats)::mapped_type()); | |
1901 | } | |
1902 | ||
1903 | std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets(); | |
1904 | for (const auto& kv : m) { | |
1905 | const auto& bucket = kv.second; | |
1906 | ||
1907 | global_stats.bytes_used += bucket.size; | |
1908 | global_stats.bytes_used_rounded += bucket.size_rounded; | |
1909 | global_stats.objects_count += bucket.count; | |
1910 | ||
1911 | /* operator[] still can create a new entry for storage policy seen | |
1912 | * for first time. */ | |
1913 | auto& policy_stats = policies_stats[bucket.placement_rule]; | |
1914 | policy_stats.bytes_used += bucket.size; | |
1915 | policy_stats.bytes_used_rounded += bucket.size_rounded; | |
1916 | policy_stats.buckets_count++; | |
1917 | policy_stats.objects_count += bucket.count; | |
1918 | } | |
1919 | global_stats.buckets_count += m.size(); | |
1920 | total_count += m.size(); | |
1921 | ||
1922 | done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit)); | |
1923 | ||
1924 | if (!started) { | |
1925 | send_response_begin(buckets.count() > 0); | |
1926 | started = true; | |
1927 | } | |
1928 | ||
1929 | if (!m.empty()) { | |
1930 | map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin(); | |
1931 | marker = riter->first; | |
1932 | ||
1933 | handle_listing_chunk(std::move(buckets)); | |
1934 | } | |
1935 | } while (is_truncated && !done); | |
1936 | ||
1937 | send_end: | |
1938 | if (!started) { | |
1939 | send_response_begin(false); | |
1940 | } | |
1941 | send_response_end(); | |
1942 | } | |
1943 | ||
1944 | void RGWGetUsage::execute() | |
1945 | { | |
1946 | uint64_t start_epoch = 0; | |
1947 | uint64_t end_epoch = (uint64_t)-1; | |
1948 | op_ret = get_params(); | |
1949 | if (op_ret < 0) | |
1950 | return; | |
1951 | ||
1952 | if (!start_date.empty()) { | |
1953 | op_ret = utime_t::parse_date(start_date, &start_epoch, NULL); | |
1954 | if (op_ret < 0) { | |
1955 | ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl; | |
1956 | return; | |
1957 | } | |
1958 | } | |
1959 | ||
1960 | if (!end_date.empty()) { | |
1961 | op_ret = utime_t::parse_date(end_date, &end_epoch, NULL); | |
1962 | if (op_ret < 0) { | |
1963 | ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl; | |
1964 | return; | |
1965 | } | |
1966 | } | |
1967 | ||
1968 | uint32_t max_entries = 1000; | |
1969 | ||
1970 | bool is_truncated = true; | |
1971 | ||
1972 | RGWUsageIter usage_iter; | |
1973 | ||
1974 | while (is_truncated) { | |
1975 | op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries, | |
1976 | &is_truncated, usage_iter, usage); | |
1977 | ||
1978 | if (op_ret == -ENOENT) { | |
1979 | op_ret = 0; | |
1980 | is_truncated = false; | |
1981 | } | |
1982 | ||
1983 | if (op_ret < 0) { | |
1984 | return; | |
1985 | } | |
1986 | } | |
1987 | ||
1988 | op_ret = rgw_user_sync_all_stats(store, s->user->user_id); | |
1989 | if (op_ret < 0) { | |
1990 | ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl; | |
1991 | return; | |
1992 | } | |
1993 | ||
1994 | op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage); | |
1995 | if (op_ret < 0) { | |
1996 | cerr << "ERROR: failed to sync user stats: " << std::endl; | |
1997 | return ; | |
1998 | } | |
1999 | ||
2000 | string user_str = s->user->user_id.to_str(); | |
2001 | op_ret = store->cls_user_get_header(user_str, &header); | |
2002 | if (op_ret < 0) { | |
2003 | ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl; | |
2004 | return; | |
2005 | } | |
2006 | ||
2007 | return; | |
2008 | } | |
2009 | ||
2010 | int RGWStatAccount::verify_permission() | |
2011 | { | |
2012 | if (!verify_user_permission(s, RGW_PERM_READ)) { | |
2013 | return -EACCES; | |
2014 | } | |
2015 | ||
2016 | return 0; | |
2017 | } | |
2018 | ||
2019 | void RGWStatAccount::execute() | |
2020 | { | |
2021 | string marker; | |
2022 | bool is_truncated = false; | |
2023 | uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk; | |
2024 | ||
2025 | do { | |
2026 | RGWUserBuckets buckets; | |
2027 | ||
2028 | op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker, | |
2029 | string(), max_buckets, true, &is_truncated); | |
2030 | if (op_ret < 0) { | |
2031 | /* hmm.. something wrong here.. the user was authenticated, so it | |
2032 | should exist */ | |
2033 | ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid=" | |
2034 | << s->user->user_id << dendl; | |
2035 | break; | |
2036 | } else { | |
2037 | /* We need to have stats for all our policies - even if a given policy | |
2038 | * isn't actually used in a given account. In such situation its usage | |
2039 | * stats would be simply full of zeros. */ | |
2040 | for (const auto& policy : store->get_zonegroup().placement_targets) { | |
2041 | policies_stats.emplace(policy.second.name, | |
2042 | decltype(policies_stats)::mapped_type()); | |
2043 | } | |
2044 | ||
2045 | std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets(); | |
2046 | for (const auto& kv : m) { | |
2047 | const auto& bucket = kv.second; | |
2048 | ||
2049 | global_stats.bytes_used += bucket.size; | |
2050 | global_stats.bytes_used_rounded += bucket.size_rounded; | |
2051 | global_stats.objects_count += bucket.count; | |
2052 | ||
2053 | /* operator[] still can create a new entry for storage policy seen | |
2054 | * for first time. */ | |
2055 | auto& policy_stats = policies_stats[bucket.placement_rule]; | |
2056 | policy_stats.bytes_used += bucket.size; | |
2057 | policy_stats.bytes_used_rounded += bucket.size_rounded; | |
2058 | policy_stats.buckets_count++; | |
2059 | policy_stats.objects_count += bucket.count; | |
2060 | } | |
2061 | global_stats.buckets_count += m.size(); | |
2062 | ||
2063 | } | |
2064 | } while (is_truncated); | |
2065 | } | |
2066 | ||
2067 | int RGWGetBucketVersioning::verify_permission() | |
2068 | { | |
2069 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning); | |
2070 | } | |
2071 | ||
2072 | void RGWGetBucketVersioning::pre_exec() | |
2073 | { | |
2074 | rgw_bucket_object_pre_exec(s); | |
2075 | } | |
2076 | ||
2077 | void RGWGetBucketVersioning::execute() | |
2078 | { | |
2079 | versioned = s->bucket_info.versioned(); | |
2080 | versioning_enabled = s->bucket_info.versioning_enabled(); | |
2081 | } | |
2082 | ||
2083 | int RGWSetBucketVersioning::verify_permission() | |
2084 | { | |
2085 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning); | |
2086 | } | |
2087 | ||
2088 | void RGWSetBucketVersioning::pre_exec() | |
2089 | { | |
2090 | rgw_bucket_object_pre_exec(s); | |
2091 | } | |
2092 | ||
2093 | void RGWSetBucketVersioning::execute() | |
2094 | { | |
2095 | op_ret = get_params(); | |
2096 | if (op_ret < 0) | |
2097 | return; | |
2098 | ||
2099 | if (!store->is_meta_master()) { | |
2100 | op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); | |
2101 | if (op_ret < 0) { | |
2102 | ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; | |
2103 | return; | |
2104 | } | |
2105 | } | |
2106 | ||
2107 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
2108 | if (enable_versioning) { | |
2109 | s->bucket_info.flags |= BUCKET_VERSIONED; | |
2110 | s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED; | |
2111 | } else { | |
2112 | s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED); | |
2113 | } | |
2114 | ||
2115 | return store->put_bucket_instance_info(s->bucket_info, false, real_time(), | |
2116 | &s->bucket_attrs); | |
2117 | }); | |
2118 | ||
2119 | if (op_ret < 0) { | |
2120 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name | |
2121 | << " returned err=" << op_ret << dendl; | |
2122 | return; | |
2123 | } | |
2124 | } | |
2125 | ||
2126 | int RGWGetBucketWebsite::verify_permission() | |
2127 | { | |
2128 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite); | |
2129 | } | |
2130 | ||
2131 | void RGWGetBucketWebsite::pre_exec() | |
2132 | { | |
2133 | rgw_bucket_object_pre_exec(s); | |
2134 | } | |
2135 | ||
2136 | void RGWGetBucketWebsite::execute() | |
2137 | { | |
2138 | if (!s->bucket_info.has_website) { | |
2139 | op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION; | |
2140 | } | |
2141 | } | |
2142 | ||
2143 | int RGWSetBucketWebsite::verify_permission() | |
2144 | { | |
2145 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite); | |
2146 | } | |
2147 | ||
2148 | void RGWSetBucketWebsite::pre_exec() | |
2149 | { | |
2150 | rgw_bucket_object_pre_exec(s); | |
2151 | } | |
2152 | ||
2153 | void RGWSetBucketWebsite::execute() | |
2154 | { | |
2155 | op_ret = get_params(); | |
2156 | ||
2157 | if (op_ret < 0) | |
2158 | return; | |
2159 | ||
2160 | if (!store->is_meta_master()) { | |
2161 | op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); | |
2162 | if (op_ret < 0) { | |
2163 | ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl; | |
2164 | return; | |
2165 | } | |
2166 | } | |
2167 | ||
2168 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
2169 | s->bucket_info.has_website = true; | |
2170 | s->bucket_info.website_conf = website_conf; | |
2171 | op_ret = store->put_bucket_instance_info(s->bucket_info, false, | |
2172 | real_time(), &s->bucket_attrs); | |
2173 | return op_ret; | |
2174 | }); | |
2175 | ||
2176 | if (op_ret < 0) { | |
2177 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; | |
2178 | return; | |
2179 | } | |
2180 | } | |
2181 | ||
2182 | int RGWDeleteBucketWebsite::verify_permission() | |
2183 | { | |
2184 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite); | |
2185 | } | |
2186 | ||
2187 | void RGWDeleteBucketWebsite::pre_exec() | |
2188 | { | |
2189 | rgw_bucket_object_pre_exec(s); | |
2190 | } | |
2191 | ||
2192 | void RGWDeleteBucketWebsite::execute() | |
2193 | { | |
2194 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
2195 | s->bucket_info.has_website = false; | |
2196 | s->bucket_info.website_conf = RGWBucketWebsiteConf(); | |
2197 | op_ret = store->put_bucket_instance_info(s->bucket_info, false, | |
2198 | real_time(), &s->bucket_attrs); | |
2199 | return op_ret; | |
2200 | }); | |
2201 | if (op_ret < 0) { | |
2202 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; | |
2203 | return; | |
2204 | } | |
2205 | } | |
2206 | ||
2207 | int RGWStatBucket::verify_permission() | |
2208 | { | |
2209 | // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission. | |
2210 | if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) { | |
2211 | return -EACCES; | |
2212 | } | |
2213 | ||
2214 | return 0; | |
2215 | } | |
2216 | ||
2217 | void RGWStatBucket::pre_exec() | |
2218 | { | |
2219 | rgw_bucket_object_pre_exec(s); | |
2220 | } | |
2221 | ||
2222 | void RGWStatBucket::execute() | |
2223 | { | |
2224 | if (!s->bucket_exists) { | |
2225 | op_ret = -ERR_NO_SUCH_BUCKET; | |
2226 | return; | |
2227 | } | |
2228 | ||
2229 | RGWUserBuckets buckets; | |
2230 | bucket.bucket = s->bucket; | |
2231 | buckets.add(bucket); | |
2232 | map<string, RGWBucketEnt>& m = buckets.get_buckets(); | |
2233 | op_ret = store->update_containers_stats(m); | |
2234 | if (! op_ret) | |
2235 | op_ret = -EEXIST; | |
2236 | if (op_ret > 0) { | |
2237 | op_ret = 0; | |
2238 | map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name); | |
2239 | if (iter != m.end()) { | |
2240 | bucket = iter->second; | |
2241 | } else { | |
2242 | op_ret = -EINVAL; | |
2243 | } | |
2244 | } | |
2245 | } | |
2246 | ||
2247 | int RGWListBucket::verify_permission() | |
2248 | { | |
2249 | op_ret = get_params(); | |
2250 | if (op_ret < 0) { | |
2251 | return op_ret; | |
2252 | } | |
2253 | if (!prefix.empty()) | |
2254 | s->env.emplace("s3:prefix", prefix); | |
2255 | ||
2256 | if (!delimiter.empty()) | |
2257 | s->env.emplace("s3:delimiter", delimiter); | |
2258 | ||
2259 | s->env.emplace("s3:max-keys", std::to_string(max)); | |
2260 | ||
2261 | if (!verify_bucket_permission(s, | |
2262 | list_versions ? | |
2263 | rgw::IAM::s3ListBucketVersions : | |
2264 | rgw::IAM::s3ListBucket)) { | |
2265 | return -EACCES; | |
2266 | } | |
2267 | ||
2268 | return 0; | |
2269 | } | |
2270 | ||
2271 | int RGWListBucket::parse_max_keys() | |
2272 | { | |
2273 | if (!max_keys.empty()) { | |
2274 | char *endptr; | |
2275 | max = strtol(max_keys.c_str(), &endptr, 10); | |
2276 | if (endptr) { | |
2277 | while (*endptr && isspace(*endptr)) // ignore white space | |
2278 | endptr++; | |
2279 | if (*endptr) { | |
2280 | return -EINVAL; | |
2281 | } | |
2282 | } | |
2283 | } else { | |
2284 | max = default_max; | |
2285 | } | |
2286 | ||
2287 | return 0; | |
2288 | } | |
2289 | ||
2290 | void RGWListBucket::pre_exec() | |
2291 | { | |
2292 | rgw_bucket_object_pre_exec(s); | |
2293 | } | |
2294 | ||
2295 | void RGWListBucket::execute() | |
2296 | { | |
2297 | if (!s->bucket_exists) { | |
2298 | op_ret = -ERR_NO_SUCH_BUCKET; | |
2299 | return; | |
2300 | } | |
2301 | ||
2302 | if (need_container_stats()) { | |
2303 | map<string, RGWBucketEnt> m; | |
2304 | m[s->bucket.name] = RGWBucketEnt(); | |
2305 | m.begin()->second.bucket = s->bucket; | |
2306 | op_ret = store->update_containers_stats(m); | |
2307 | if (op_ret > 0) { | |
2308 | bucket = m.begin()->second; | |
2309 | } | |
2310 | } | |
2311 | ||
2312 | RGWRados::Bucket target(store, s->bucket_info); | |
2313 | if (shard_id >= 0) { | |
2314 | target.set_shard_id(shard_id); | |
2315 | } | |
2316 | RGWRados::Bucket::List list_op(&target); | |
2317 | ||
2318 | list_op.params.prefix = prefix; | |
2319 | list_op.params.delim = delimiter; | |
2320 | list_op.params.marker = marker; | |
2321 | list_op.params.end_marker = end_marker; | |
2322 | list_op.params.list_versions = list_versions; | |
2323 | ||
2324 | op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated); | |
2325 | if (op_ret >= 0) { | |
2326 | next_marker = list_op.get_next_marker(); | |
2327 | } | |
2328 | } | |
2329 | ||
2330 | int RGWGetBucketLogging::verify_permission() | |
2331 | { | |
2332 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging); | |
2333 | } | |
2334 | ||
2335 | int RGWGetBucketLocation::verify_permission() | |
2336 | { | |
2337 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation); | |
2338 | } | |
2339 | ||
2340 | int RGWCreateBucket::verify_permission() | |
2341 | { | |
2342 | /* This check is mostly needed for S3 that doesn't support account ACL. | |
2343 | * Swift doesn't allow to delegate any permission to an anonymous user, | |
2344 | * so it will become an early exit in such case. */ | |
2345 | if (s->auth.identity->is_anonymous()) { | |
2346 | return -EACCES; | |
2347 | } | |
2348 | ||
2349 | if (!verify_user_permission(s, RGW_PERM_WRITE)) { | |
2350 | return -EACCES; | |
2351 | } | |
2352 | ||
2353 | if (s->user->user_id.tenant != s->bucket_tenant) { | |
2354 | ldout(s->cct, 10) << "user cannot create a bucket in a different tenant" | |
2355 | << " (user_id.tenant=" << s->user->user_id.tenant | |
2356 | << " requested=" << s->bucket_tenant << ")" | |
2357 | << dendl; | |
2358 | return -EACCES; | |
2359 | } | |
2360 | if (s->user->max_buckets < 0) { | |
2361 | return -EPERM; | |
2362 | } | |
2363 | ||
2364 | if (s->user->max_buckets) { | |
2365 | RGWUserBuckets buckets; | |
2366 | string marker; | |
2367 | bool is_truncated = false; | |
2368 | op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, | |
2369 | marker, string(), s->user->max_buckets, | |
2370 | false, &is_truncated); | |
2371 | if (op_ret < 0) { | |
2372 | return op_ret; | |
2373 | } | |
2374 | ||
2375 | if ((int)buckets.count() >= s->user->max_buckets) { | |
2376 | return -ERR_TOO_MANY_BUCKETS; | |
2377 | } | |
2378 | } | |
2379 | ||
2380 | return 0; | |
2381 | } | |
2382 | ||
2383 | static int forward_request_to_master(struct req_state *s, obj_version *objv, | |
2384 | RGWRados *store, bufferlist& in_data, | |
2385 | JSONParser *jp, req_info *forward_info) | |
2386 | { | |
2387 | if (!store->rest_master_conn) { | |
2388 | ldout(s->cct, 0) << "rest connection is invalid" << dendl; | |
2389 | return -EINVAL; | |
2390 | } | |
2391 | ldout(s->cct, 0) << "sending request to master zonegroup" << dendl; | |
2392 | bufferlist response; | |
2393 | string uid_str = s->user->user_id.to_str(); | |
2394 | #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response | |
2395 | int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info), | |
2396 | objv, MAX_REST_RESPONSE, &in_data, &response); | |
2397 | if (ret < 0) | |
2398 | return ret; | |
2399 | ||
2400 | ldout(s->cct, 20) << "response: " << response.c_str() << dendl; | |
2401 | if (jp && !jp->parse(response.c_str(), response.length())) { | |
2402 | ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl; | |
2403 | return -EINVAL; | |
2404 | } | |
2405 | ||
2406 | return 0; | |
2407 | } | |
2408 | ||
2409 | void RGWCreateBucket::pre_exec() | |
2410 | { | |
2411 | rgw_bucket_object_pre_exec(s); | |
2412 | } | |
2413 | ||
2414 | static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs, | |
2415 | map<string, bufferlist>& out_attrs, | |
2416 | map<string, bufferlist>& out_rmattrs) | |
2417 | { | |
2418 | for (const auto& kv : orig_attrs) { | |
2419 | const string& name = kv.first; | |
2420 | ||
2421 | /* Check if the attr is user-defined metadata item. */ | |
2422 | if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1, | |
2423 | RGW_ATTR_META_PREFIX) == 0) { | |
2424 | /* For the objects all existing meta attrs have to be removed. */ | |
2425 | out_rmattrs[name] = kv.second; | |
2426 | } else if (out_attrs.find(name) == std::end(out_attrs)) { | |
2427 | out_attrs[name] = kv.second; | |
2428 | } | |
2429 | } | |
2430 | } | |
2431 | ||
2432 | /* Fuse resource metadata basing on original attributes in @orig_attrs, set | |
2433 | * of _custom_ attribute names to remove in @rmattr_names and attributes in | |
2434 | * @out_attrs. Place results in @out_attrs. | |
2435 | * | |
2436 | * NOTE: it's supposed that all special attrs already present in @out_attrs | |
2437 | * will be preserved without any change. Special attributes are those which | |
2438 | * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones | |
2439 | * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */ | |
2440 | static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs, | |
2441 | const set<string>& rmattr_names, | |
2442 | map<string, bufferlist>& out_attrs) | |
2443 | { | |
2444 | for (const auto& kv : orig_attrs) { | |
2445 | const string& name = kv.first; | |
2446 | ||
2447 | /* Check if the attr is user-defined metadata item. */ | |
2448 | if (name.compare(0, strlen(RGW_ATTR_META_PREFIX), | |
2449 | RGW_ATTR_META_PREFIX) == 0) { | |
2450 | /* For the buckets all existing meta attrs are preserved, | |
2451 | except those that are listed in rmattr_names. */ | |
2452 | if (rmattr_names.find(name) != std::end(rmattr_names)) { | |
2453 | const auto aiter = out_attrs.find(name); | |
2454 | ||
2455 | if (aiter != std::end(out_attrs)) { | |
2456 | out_attrs.erase(aiter); | |
2457 | } | |
2458 | } else { | |
2459 | /* emplace() won't alter the map if the key is already present. | |
2460 | * This behaviour is fully intensional here. */ | |
2461 | out_attrs.emplace(kv); | |
2462 | } | |
2463 | } else if (out_attrs.find(name) == std::end(out_attrs)) { | |
2464 | out_attrs[name] = kv.second; | |
2465 | } | |
2466 | } | |
2467 | } | |
2468 | ||
2469 | ||
2470 | static void populate_with_generic_attrs(const req_state * const s, | |
2471 | map<string, bufferlist>& out_attrs) | |
2472 | { | |
2473 | for (const auto& kv : s->generic_attrs) { | |
2474 | bufferlist& attrbl = out_attrs[kv.first]; | |
2475 | const string& val = kv.second; | |
2476 | attrbl.clear(); | |
2477 | attrbl.append(val.c_str(), val.size() + 1); | |
2478 | } | |
2479 | } | |
2480 | ||
2481 | ||
2482 | static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs, | |
2483 | const std::set<std::string>& rmattr_names, | |
2484 | RGWQuotaInfo& quota, | |
2485 | bool * quota_extracted = nullptr) | |
2486 | { | |
2487 | bool extracted = false; | |
2488 | ||
2489 | /* Put new limit on max objects. */ | |
2490 | auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS); | |
2491 | std::string err; | |
2492 | if (std::end(add_attrs) != iter) { | |
2493 | quota.max_objects = | |
2494 | static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err)); | |
2495 | if (!err.empty()) { | |
2496 | return -EINVAL; | |
2497 | } | |
2498 | add_attrs.erase(iter); | |
2499 | extracted = true; | |
2500 | } | |
2501 | ||
2502 | /* Put new limit on bucket (container) size. */ | |
2503 | iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE); | |
2504 | if (iter != add_attrs.end()) { | |
2505 | quota.max_size = | |
2506 | static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err)); | |
2507 | if (!err.empty()) { | |
2508 | return -EINVAL; | |
2509 | } | |
2510 | add_attrs.erase(iter); | |
2511 | extracted = true; | |
2512 | } | |
2513 | ||
2514 | for (const auto& name : rmattr_names) { | |
2515 | /* Remove limit on max objects. */ | |
2516 | if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) { | |
2517 | quota.max_objects = -1; | |
2518 | extracted = true; | |
2519 | } | |
2520 | ||
2521 | /* Remove limit on max bucket size. */ | |
2522 | if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) { | |
2523 | quota.max_size = -1; | |
2524 | extracted = true; | |
2525 | } | |
2526 | } | |
2527 | ||
2528 | /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */ | |
2529 | quota.check_on_raw = true; | |
2530 | quota.enabled = quota.max_size > 0 || quota.max_objects > 0; | |
2531 | ||
2532 | if (quota_extracted) { | |
2533 | *quota_extracted = extracted; | |
2534 | } | |
2535 | ||
2536 | return 0; | |
2537 | } | |
2538 | ||
2539 | ||
2540 | static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs, | |
2541 | const std::set<std::string>& rmattr_names, | |
2542 | RGWBucketWebsiteConf& ws_conf) | |
2543 | { | |
2544 | std::string lstval; | |
2545 | ||
2546 | /* Let's define a mapping between each custom attribute and the memory where | |
2547 | * attribute's value should be stored. The memory location is expressed by | |
2548 | * a non-const reference. */ | |
2549 | const auto mapping = { | |
2550 | std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)), | |
2551 | std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)), | |
2552 | std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)), | |
2553 | std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)), | |
2554 | std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker)) | |
2555 | }; | |
2556 | ||
2557 | for (const auto& kv : mapping) { | |
2558 | const char * const key = kv.first; | |
2559 | auto& target = kv.second; | |
2560 | ||
2561 | auto iter = add_attrs.find(key); | |
2562 | ||
2563 | if (std::end(add_attrs) != iter) { | |
2564 | /* The "target" is a reference to ws_conf. */ | |
2565 | target = iter->second.c_str(); | |
2566 | add_attrs.erase(iter); | |
2567 | } | |
2568 | ||
2569 | if (rmattr_names.count(key)) { | |
2570 | target = std::string(); | |
2571 | } | |
2572 | } | |
2573 | ||
2574 | if (! lstval.empty()) { | |
2575 | ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true"); | |
2576 | } | |
2577 | } | |
2578 | ||
2579 | ||
2580 | void RGWCreateBucket::execute() | |
2581 | { | |
2582 | RGWAccessControlPolicy old_policy(s->cct); | |
2583 | buffer::list aclbl; | |
2584 | buffer::list corsbl; | |
2585 | bool existed; | |
2586 | string bucket_name; | |
2587 | rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name); | |
2588 | rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name); | |
2589 | obj_version objv, *pobjv = NULL; | |
2590 | ||
2591 | op_ret = get_params(); | |
2592 | if (op_ret < 0) | |
2593 | return; | |
2594 | ||
2595 | if (!location_constraint.empty() && | |
2596 | !store->has_zonegroup_api(location_constraint)) { | |
2597 | ldout(s->cct, 0) << "location constraint (" << location_constraint << ")" | |
2598 | << " can't be found." << dendl; | |
2599 | op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; | |
2600 | s->err.message = "The specified location-constraint is not valid"; | |
2601 | return; | |
2602 | } | |
2603 | ||
2604 | if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() && | |
2605 | store->get_zonegroup().api_name != location_constraint) { | |
2606 | ldout(s->cct, 0) << "location constraint (" << location_constraint << ")" | |
2607 | << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")" | |
2608 | << dendl; | |
2609 | op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; | |
2610 | s->err.message = "The specified location-constraint is not valid"; | |
2611 | return; | |
2612 | } | |
2613 | ||
2614 | const auto& zonegroup = store->get_zonegroup(); | |
2615 | if (!placement_rule.empty() && | |
2616 | !zonegroup.placement_targets.count(placement_rule)) { | |
2617 | ldout(s->cct, 0) << "placement target (" << placement_rule << ")" | |
2618 | << " doesn't exist in the placement targets of zonegroup" | |
2619 | << " (" << store->get_zonegroup().api_name << ")" << dendl; | |
2620 | op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; | |
2621 | s->err.message = "The specified placement target does not exist"; | |
2622 | return; | |
2623 | } | |
2624 | ||
2625 | /* we need to make sure we read bucket info, it's not read before for this | |
2626 | * specific request */ | |
2627 | RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
2628 | op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, | |
2629 | s->bucket_info, NULL, &s->bucket_attrs); | |
2630 | if (op_ret < 0 && op_ret != -ENOENT) | |
2631 | return; | |
2632 | s->bucket_exists = (op_ret != -ENOENT); | |
2633 | ||
2634 | s->bucket_owner.set_id(s->user->user_id); | |
2635 | s->bucket_owner.set_name(s->user->display_name); | |
2636 | if (s->bucket_exists) { | |
2637 | int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info, | |
2638 | s->bucket_attrs, &old_policy); | |
2639 | if (r >= 0) { | |
2640 | if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) { | |
2641 | op_ret = -EEXIST; | |
2642 | return; | |
2643 | } | |
2644 | } | |
2645 | } | |
2646 | ||
2647 | RGWBucketInfo master_info; | |
2648 | rgw_bucket *pmaster_bucket; | |
2649 | uint32_t *pmaster_num_shards; | |
2650 | real_time creation_time; | |
2651 | ||
2652 | if (!store->is_meta_master()) { | |
2653 | JSONParser jp; | |
2654 | op_ret = forward_request_to_master(s, NULL, store, in_data, &jp); | |
2655 | if (op_ret < 0) { | |
2656 | return; | |
2657 | } | |
2658 | ||
2659 | JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp); | |
2660 | JSONDecoder::decode_json("object_ver", objv, &jp); | |
2661 | JSONDecoder::decode_json("bucket_info", master_info, &jp); | |
2662 | ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; | |
2663 | ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl; | |
2664 | pmaster_bucket= &master_info.bucket; | |
2665 | creation_time = master_info.creation_time; | |
2666 | pmaster_num_shards = &master_info.num_shards; | |
2667 | pobjv = &objv; | |
2668 | } else { | |
2669 | pmaster_bucket = NULL; | |
2670 | pmaster_num_shards = NULL; | |
2671 | } | |
2672 | ||
2673 | string zonegroup_id; | |
2674 | ||
2675 | if (s->system_request) { | |
2676 | zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup"); | |
2677 | if (zonegroup_id.empty()) { | |
2678 | zonegroup_id = store->get_zonegroup().get_id(); | |
2679 | } | |
2680 | } else { | |
2681 | zonegroup_id = store->get_zonegroup().get_id(); | |
2682 | } | |
2683 | ||
2684 | if (s->bucket_exists) { | |
2685 | string selected_placement_rule; | |
2686 | rgw_bucket bucket; | |
2687 | bucket.tenant = s->bucket_tenant; | |
2688 | bucket.name = s->bucket_name; | |
2689 | op_ret = store->select_bucket_placement(*(s->user), zonegroup_id, | |
2690 | placement_rule, | |
2691 | &selected_placement_rule, nullptr); | |
2692 | if (selected_placement_rule != s->bucket_info.placement_rule) { | |
2693 | op_ret = -EEXIST; | |
2694 | return; | |
2695 | } | |
2696 | } | |
2697 | ||
2698 | /* Encode special metadata first as we're using std::map::emplace under | |
2699 | * the hood. This method will add the new items only if the map doesn't | |
2700 | * contain such keys yet. */ | |
2701 | policy.encode(aclbl); | |
2702 | emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); | |
2703 | ||
2704 | if (has_cors) { | |
2705 | cors_config.encode(corsbl); | |
2706 | emplace_attr(RGW_ATTR_CORS, std::move(corsbl)); | |
2707 | } | |
2708 | ||
2709 | RGWQuotaInfo quota_info; | |
2710 | const RGWQuotaInfo * pquota_info = nullptr; | |
2711 | if (need_metadata_upload()) { | |
2712 | /* It's supposed that following functions WILL NOT change any special | |
2713 | * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */ | |
2714 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); | |
2715 | if (op_ret < 0) { | |
2716 | return; | |
2717 | } | |
2718 | prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); | |
2719 | populate_with_generic_attrs(s, attrs); | |
2720 | ||
2721 | op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info); | |
2722 | if (op_ret < 0) { | |
2723 | return; | |
2724 | } else { | |
2725 | pquota_info = "a_info; | |
2726 | } | |
2727 | ||
2728 | /* Web site of Swift API. */ | |
2729 | filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf); | |
2730 | s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty(); | |
2731 | } | |
2732 | ||
2733 | s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */ | |
2734 | s->bucket.name = s->bucket_name; | |
2735 | ||
2736 | /* Handle updates of the metadata for Swift's object versioning. */ | |
2737 | if (swift_ver_location) { | |
2738 | s->bucket_info.swift_ver_location = *swift_ver_location; | |
2739 | s->bucket_info.swift_versioning = (! swift_ver_location->empty()); | |
2740 | } | |
2741 | ||
2742 | op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id, | |
2743 | placement_rule, s->bucket_info.swift_ver_location, | |
2744 | pquota_info, attrs, | |
2745 | info, pobjv, &ep_objv, creation_time, | |
2746 | pmaster_bucket, pmaster_num_shards, true); | |
2747 | /* continue if EEXIST and create_bucket will fail below. this way we can | |
2748 | * recover from a partial create by retrying it. */ | |
2749 | ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl; | |
2750 | ||
2751 | if (op_ret && op_ret != -EEXIST) | |
2752 | return; | |
2753 | ||
2754 | existed = (op_ret == -EEXIST); | |
2755 | ||
2756 | if (existed) { | |
2757 | /* bucket already existed, might have raced with another bucket creation, or | |
2758 | * might be partial bucket creation that never completed. Read existing bucket | |
2759 | * info, verify that the reported bucket owner is the current user. | |
2760 | * If all is ok then update the user's list of buckets. | |
2761 | * Otherwise inform client about a name conflict. | |
2762 | */ | |
2763 | if (info.owner.compare(s->user->user_id) != 0) { | |
2764 | op_ret = -EEXIST; | |
2765 | return; | |
2766 | } | |
2767 | s->bucket = info.bucket; | |
2768 | } | |
2769 | ||
2770 | op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket, | |
2771 | info.creation_time, false); | |
2772 | if (op_ret && !existed && op_ret != -EEXIST) { | |
2773 | /* if it exists (or previously existed), don't remove it! */ | |
2774 | op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant, | |
2775 | s->bucket.name); | |
2776 | if (op_ret < 0) { | |
2777 | ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret | |
2778 | << dendl; | |
2779 | } | |
2780 | } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) { | |
2781 | op_ret = -ERR_BUCKET_EXISTS; | |
2782 | } | |
2783 | ||
2784 | if (need_metadata_upload() && existed) { | |
2785 | /* OK, it looks we lost race with another request. As it's required to | |
2786 | * handle metadata fusion and upload, the whole operation becomes very | |
2787 | * similar in nature to PutMetadataBucket. However, as the attrs may | |
2788 | * changed in the meantime, we have to refresh. */ | |
2789 | short tries = 0; | |
2790 | do { | |
2791 | RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
2792 | RGWBucketInfo binfo; | |
2793 | map<string, bufferlist> battrs; | |
2794 | ||
2795 | op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, | |
2796 | binfo, nullptr, &battrs); | |
2797 | if (op_ret < 0) { | |
2798 | return; | |
2799 | } else if (binfo.owner.compare(s->user->user_id) != 0) { | |
2800 | /* New bucket doesn't belong to the account we're operating on. */ | |
2801 | op_ret = -EEXIST; | |
2802 | return; | |
2803 | } else { | |
2804 | s->bucket_info = binfo; | |
2805 | s->bucket_attrs = battrs; | |
2806 | } | |
2807 | ||
2808 | attrs.clear(); | |
2809 | ||
2810 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); | |
2811 | if (op_ret < 0) { | |
2812 | return; | |
2813 | } | |
2814 | prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); | |
2815 | populate_with_generic_attrs(s, attrs); | |
2816 | op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota); | |
2817 | if (op_ret < 0) { | |
2818 | return; | |
2819 | } | |
2820 | ||
2821 | /* Handle updates of the metadata for Swift's object versioning. */ | |
2822 | if (swift_ver_location) { | |
2823 | s->bucket_info.swift_ver_location = *swift_ver_location; | |
2824 | s->bucket_info.swift_versioning = (! swift_ver_location->empty()); | |
2825 | } | |
2826 | ||
2827 | /* Web site of Swift API. */ | |
2828 | filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf); | |
2829 | s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty(); | |
2830 | ||
2831 | /* This will also set the quota on the bucket. */ | |
2832 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, | |
2833 | &s->bucket_info.objv_tracker); | |
2834 | } while (op_ret == -ECANCELED && tries++ < 20); | |
2835 | ||
2836 | /* Restore the proper return code. */ | |
2837 | if (op_ret >= 0) { | |
2838 | op_ret = -ERR_BUCKET_EXISTS; | |
2839 | } | |
2840 | } | |
2841 | } | |
2842 | ||
2843 | int RGWDeleteBucket::verify_permission() | |
2844 | { | |
2845 | if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) { | |
2846 | return -EACCES; | |
2847 | } | |
2848 | ||
2849 | return 0; | |
2850 | } | |
2851 | ||
2852 | void RGWDeleteBucket::pre_exec() | |
2853 | { | |
2854 | rgw_bucket_object_pre_exec(s); | |
2855 | } | |
2856 | ||
2857 | void RGWDeleteBucket::execute() | |
2858 | { | |
2859 | op_ret = -EINVAL; | |
2860 | ||
2861 | if (s->bucket_name.empty()) | |
2862 | return; | |
2863 | ||
2864 | if (!s->bucket_exists) { | |
2865 | ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl; | |
2866 | op_ret = -ERR_NO_SUCH_BUCKET; | |
2867 | return; | |
2868 | } | |
2869 | RGWObjVersionTracker ot; | |
2870 | ot.read_version = s->bucket_info.ep_objv; | |
2871 | ||
2872 | if (s->system_request) { | |
2873 | string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag"); | |
2874 | string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver"); | |
2875 | if (!tag.empty()) { | |
2876 | ot.read_version.tag = tag; | |
2877 | uint64_t ver; | |
2878 | string err; | |
2879 | ver = strict_strtol(ver_str.c_str(), 10, &err); | |
2880 | if (!err.empty()) { | |
2881 | ldout(s->cct, 0) << "failed to parse ver param" << dendl; | |
2882 | op_ret = -EINVAL; | |
2883 | return; | |
2884 | } | |
2885 | ot.read_version.ver = ver; | |
2886 | } | |
2887 | } | |
2888 | ||
2889 | op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info); | |
2890 | if ( op_ret < 0) { | |
2891 | ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; | |
2892 | } | |
2893 | ||
2894 | op_ret = store->check_bucket_empty(s->bucket_info); | |
2895 | if (op_ret < 0) { | |
2896 | return; | |
2897 | } | |
2898 | ||
2899 | if (!store->is_meta_master()) { | |
2900 | bufferlist in_data; | |
2901 | op_ret = forward_request_to_master(s, &ot.read_version, store, in_data, | |
2902 | NULL); | |
2903 | if (op_ret < 0) { | |
2904 | if (op_ret == -ENOENT) { | |
2905 | /* adjust error, we want to return with NoSuchBucket and not | |
2906 | * NoSuchKey */ | |
2907 | op_ret = -ERR_NO_SUCH_BUCKET; | |
2908 | } | |
2909 | return; | |
2910 | } | |
2911 | } | |
2912 | ||
2913 | string prefix, delimiter; | |
2914 | ||
2915 | if (s->prot_flags & RGW_REST_SWIFT) { | |
2916 | string path_args; | |
2917 | path_args = s->info.args.get("path"); | |
2918 | if (!path_args.empty()) { | |
2919 | if (!delimiter.empty() || !prefix.empty()) { | |
2920 | op_ret = -EINVAL; | |
2921 | return; | |
2922 | } | |
2923 | prefix = path_args; | |
2924 | delimiter="/"; | |
2925 | } | |
2926 | } | |
2927 | ||
2928 | op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter); | |
2929 | ||
2930 | if (op_ret < 0) { | |
2931 | return; | |
2932 | } | |
2933 | ||
2934 | op_ret = store->delete_bucket(s->bucket_info, ot, false); | |
2935 | ||
2936 | if (op_ret == -ECANCELED) { | |
2937 | // lost a race, either with mdlog sync or another delete bucket operation. | |
2938 | // in either case, we've already called rgw_unlink_bucket() | |
2939 | op_ret = 0; | |
2940 | return; | |
2941 | } | |
2942 | ||
2943 | if (op_ret == 0) { | |
2944 | op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant, | |
2945 | s->bucket.name, false); | |
2946 | if (op_ret < 0) { | |
2947 | ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret | |
2948 | << dendl; | |
2949 | } | |
2950 | } | |
2951 | ||
2952 | if (op_ret < 0) { | |
2953 | return; | |
2954 | } | |
2955 | ||
2956 | ||
2957 | } | |
2958 | ||
2959 | int RGWPutObj::verify_permission() | |
2960 | { | |
2961 | if (copy_source) { | |
2962 | ||
2963 | RGWAccessControlPolicy cs_acl(s->cct); | |
2964 | optional<Policy> policy; | |
2965 | map<string, bufferlist> cs_attrs; | |
2966 | rgw_bucket cs_bucket(copy_source_bucket_info.bucket); | |
2967 | rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id); | |
2968 | ||
2969 | rgw_obj obj(cs_bucket, cs_object); | |
2970 | store->set_atomic(s->obj_ctx, obj); | |
2971 | store->set_prefetch_data(s->obj_ctx, obj); | |
2972 | ||
2973 | /* check source object permissions */ | |
2974 | if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, policy, | |
2975 | cs_bucket, cs_object) < 0) { | |
2976 | return -EACCES; | |
2977 | } | |
2978 | ||
2979 | /* admin request overrides permission checks */ | |
2980 | if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) { | |
2981 | if (policy) { | |
2982 | auto e = policy->eval(s->env, *s->auth.identity, | |
2983 | cs_object.instance.empty() ? | |
2984 | rgw::IAM::s3GetObject : | |
2985 | rgw::IAM::s3GetObjectVersion, | |
2986 | rgw::IAM::ARN(obj)); | |
2987 | if (e == Effect::Deny) { | |
2988 | return -EACCES; | |
2989 | } else if (e == Effect::Pass && | |
2990 | !cs_acl.verify_permission(*s->auth.identity, s->perm_mask, | |
2991 | RGW_PERM_READ)) { | |
2992 | return -EACCES; | |
2993 | } | |
2994 | } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask, | |
2995 | RGW_PERM_READ)) { | |
2996 | return -EACCES; | |
2997 | } | |
2998 | } | |
2999 | } | |
3000 | ||
3001 | if (s->iam_policy) { | |
3002 | auto e = s->iam_policy->eval(s->env, *s->auth.identity, | |
3003 | rgw::IAM::s3PutObject, | |
3004 | rgw_obj(s->bucket, s->object)); | |
3005 | if (e == Effect::Allow) { | |
3006 | return 0; | |
3007 | } else if (e == Effect::Deny) { | |
3008 | return -EACCES; | |
3009 | } | |
3010 | } | |
3011 | ||
3012 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
3013 | return -EACCES; | |
3014 | } | |
3015 | ||
3016 | return 0; | |
3017 | } | |
3018 | ||
3019 | void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){ | |
3020 | *_mp = ∓ | |
3021 | } | |
3022 | ||
3023 | int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand) | |
3024 | { | |
3025 | string oid = obj_str; | |
3026 | upload_id = s->info.args.get("uploadId"); | |
3027 | if (!oid_rand) { | |
3028 | mp.init(oid, upload_id); | |
3029 | } else { | |
3030 | mp.init(oid, upload_id, *oid_rand); | |
3031 | } | |
3032 | ||
3033 | part_num = s->info.args.get("partNumber"); | |
3034 | if (part_num.empty()) { | |
3035 | ldout(s->cct, 10) << "part number is empty" << dendl; | |
3036 | return -EINVAL; | |
3037 | } | |
3038 | ||
3039 | string err; | |
3040 | uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err); | |
3041 | ||
3042 | if (!err.empty()) { | |
3043 | ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl; | |
3044 | return -EINVAL; | |
3045 | } | |
3046 | ||
3047 | string upload_prefix = oid + "."; | |
3048 | ||
3049 | if (!oid_rand) { | |
3050 | upload_prefix.append(upload_id); | |
3051 | } else { | |
3052 | upload_prefix.append(*oid_rand); | |
3053 | } | |
3054 | ||
3055 | rgw_obj target_obj; | |
3056 | target_obj.init(bucket, oid); | |
3057 | ||
3058 | manifest.set_prefix(upload_prefix); | |
3059 | ||
3060 | manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num); | |
3061 | ||
3062 | int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj); | |
3063 | if (r < 0) { | |
3064 | return r; | |
3065 | } | |
3066 | ||
3067 | cur_obj = manifest_gen.get_cur_obj(store); | |
3068 | rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj); | |
3069 | head_obj.index_hash_source = obj_str; | |
3070 | ||
3071 | r = prepare_init(store, NULL); | |
3072 | if (r < 0) { | |
3073 | return r; | |
3074 | } | |
3075 | ||
3076 | return 0; | |
3077 | } | |
3078 | ||
3079 | int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size, | |
3080 | const string& etag, | |
3081 | real_time *mtime, real_time set_mtime, | |
3082 | map<string, bufferlist>& attrs, | |
3083 | real_time delete_at, | |
3084 | const char *if_match, | |
3085 | const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace) | |
3086 | { | |
3087 | complete_writing_data(); | |
3088 | ||
3089 | RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj); | |
3090 | op_target.set_versioning_disabled(true); | |
3091 | RGWRados::Object::Write head_obj_op(&op_target); | |
3092 | ||
3093 | head_obj_op.meta.set_mtime = set_mtime; | |
3094 | head_obj_op.meta.mtime = mtime; | |
3095 | head_obj_op.meta.owner = s->owner.get_id(); | |
3096 | head_obj_op.meta.delete_at = delete_at; | |
3097 | head_obj_op.meta.zones_trace = zones_trace; | |
3098 | head_obj_op.meta.modify_tail = true; | |
3099 | ||
3100 | int r = head_obj_op.write_meta(obj_len, accounted_size, attrs); | |
3101 | if (r < 0) | |
3102 | return r; | |
3103 | ||
3104 | bufferlist bl; | |
3105 | RGWUploadPartInfo info; | |
3106 | string p = "part."; | |
3107 | bool sorted_omap = is_v2_upload_id(upload_id); | |
3108 | ||
3109 | if (sorted_omap) { | |
3110 | string err; | |
3111 | int part_num_int = strict_strtol(part_num.c_str(), 10, &err); | |
3112 | if (!err.empty()) { | |
3113 | dout(10) << "bad part number specified: " << part_num << dendl; | |
3114 | return -EINVAL; | |
3115 | } | |
3116 | char buf[32]; | |
3117 | snprintf(buf, sizeof(buf), "%08d", part_num_int); | |
3118 | p.append(buf); | |
3119 | } else { | |
3120 | p.append(part_num); | |
3121 | } | |
3122 | info.num = atoi(part_num.c_str()); | |
3123 | info.etag = etag; | |
3124 | info.size = obj_len; | |
3125 | info.accounted_size = accounted_size; | |
3126 | info.modified = real_clock::now(); | |
3127 | info.manifest = manifest; | |
3128 | ||
3129 | bool compressed; | |
3130 | r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info); | |
3131 | if (r < 0) { | |
3132 | dout(1) << "cannot get compression info" << dendl; | |
3133 | return r; | |
3134 | } | |
3135 | ||
3136 | ::encode(info, bl); | |
3137 | ||
3138 | string multipart_meta_obj = mp.get_meta(); | |
3139 | ||
3140 | rgw_obj meta_obj; | |
3141 | meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns); | |
3142 | meta_obj.set_in_extra_data(true); | |
3143 | ||
3144 | rgw_raw_obj raw_meta_obj; | |
3145 | ||
3146 | store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj); | |
3147 | ||
3148 | r = store->omap_set(raw_meta_obj, p, bl); | |
3149 | ||
3150 | return r; | |
3151 | } | |
3152 | ||
3153 | RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart) | |
3154 | { | |
3155 | RGWPutObjProcessor *processor; | |
3156 | ||
3157 | bool multipart = s->info.args.exists("uploadId"); | |
3158 | ||
3159 | uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size; | |
3160 | ||
3161 | if (!multipart) { | |
3162 | processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled()); | |
3163 | (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch); | |
3164 | (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id); | |
3165 | } else { | |
3166 | processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s); | |
3167 | } | |
3168 | ||
3169 | if (is_multipart) { | |
3170 | *is_multipart = multipart; | |
3171 | } | |
3172 | ||
3173 | return processor; | |
3174 | } | |
3175 | ||
3176 | void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor) | |
3177 | { | |
3178 | delete processor; | |
3179 | } | |
3180 | ||
3181 | void RGWPutObj::pre_exec() | |
3182 | { | |
3183 | rgw_bucket_object_pre_exec(s); | |
3184 | } | |
3185 | ||
3186 | class RGWPutObj_CB : public RGWGetDataCB | |
3187 | { | |
3188 | RGWPutObj *op; | |
3189 | public: | |
3190 | RGWPutObj_CB(RGWPutObj *_op) : op(_op) {} | |
3191 | ~RGWPutObj_CB() override {} | |
3192 | ||
3193 | int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override { | |
3194 | return op->get_data_cb(bl, bl_ofs, bl_len); | |
3195 | } | |
3196 | }; | |
3197 | ||
3198 | int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) | |
3199 | { | |
3200 | bufferlist bl_tmp; | |
3201 | bl.copy(bl_ofs, bl_len, bl_tmp); | |
3202 | ||
3203 | bl_aux.append(bl_tmp); | |
3204 | ||
3205 | return bl_len; | |
3206 | } | |
3207 | ||
3208 | int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) | |
3209 | { | |
3210 | RGWPutObj_CB cb(this); | |
3211 | RGWGetDataCB* filter = &cb; | |
3212 | boost::optional<RGWGetObj_Decompress> decompress; | |
3213 | std::unique_ptr<RGWGetDataCB> decrypt; | |
3214 | RGWCompressionInfo cs_info; | |
3215 | map<string, bufferlist> attrs; | |
3216 | map<string, bufferlist>::iterator attr_iter; | |
3217 | int ret = 0; | |
3218 | ||
3219 | uint64_t obj_size; | |
3220 | int64_t new_ofs, new_end; | |
3221 | ||
3222 | new_ofs = fst; | |
3223 | new_end = lst; | |
3224 | ||
3225 | rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id); | |
3226 | rgw_obj obj(copy_source_bucket_info.bucket, obj_key); | |
3227 | ||
3228 | RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
3229 | RGWRados::Object::Read read_op(&op_target); | |
3230 | read_op.params.obj_size = &obj_size; | |
3231 | read_op.params.attrs = &attrs; | |
3232 | ||
3233 | ret = read_op.prepare(); | |
3234 | if (ret < 0) | |
3235 | return ret; | |
3236 | ||
3237 | bool need_decompress; | |
3238 | op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); | |
3239 | if (op_ret < 0) { | |
3240 | lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; | |
3241 | return -EIO; | |
3242 | } | |
3243 | ||
3244 | bool partial_content = true; | |
3245 | if (need_decompress) | |
3246 | { | |
3247 | obj_size = cs_info.orig_size; | |
3248 | decompress.emplace(s->cct, &cs_info, partial_content, filter); | |
3249 | filter = &*decompress; | |
3250 | } | |
3251 | ||
3252 | attr_iter = attrs.find(RGW_ATTR_MANIFEST); | |
3253 | op_ret = this->get_decrypt_filter(&decrypt, | |
3254 | filter, | |
3255 | attrs, | |
3256 | attr_iter != attrs.end() ? &(attr_iter->second) : nullptr); | |
3257 | if (decrypt != nullptr) { | |
3258 | filter = decrypt.get(); | |
3259 | } | |
3260 | if (op_ret < 0) { | |
3261 | return ret; | |
3262 | } | |
3263 | ||
3264 | ret = read_op.range_to_ofs(obj_size, new_ofs, new_end); | |
3265 | if (ret < 0) | |
3266 | return ret; | |
3267 | ||
3268 | filter->fixup_range(new_ofs, new_end); | |
3269 | ret = read_op.iterate(new_ofs, new_end, filter); | |
3270 | ||
3271 | if (ret >= 0) | |
3272 | ret = filter->flush(); | |
3273 | ||
3274 | bl.claim_append(bl_aux); | |
3275 | ||
3276 | return ret; | |
3277 | } | |
3278 | ||
3279 | // special handling for compression type = "random" with multipart uploads | |
3280 | static CompressorRef get_compressor_plugin(const req_state *s, | |
3281 | const std::string& compression_type) | |
3282 | { | |
3283 | if (compression_type != "random") { | |
3284 | return Compressor::create(s->cct, compression_type); | |
3285 | } | |
3286 | ||
3287 | bool is_multipart{false}; | |
3288 | const auto& upload_id = s->info.args.get("uploadId", &is_multipart); | |
3289 | ||
3290 | if (!is_multipart) { | |
3291 | return Compressor::create(s->cct, compression_type); | |
3292 | } | |
3293 | ||
3294 | // use a hash of the multipart upload id so all parts use the same plugin | |
3295 | const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST; | |
3296 | if (alg == Compressor::COMP_ALG_NONE) { | |
3297 | return nullptr; | |
3298 | } | |
3299 | return Compressor::create(s->cct, alg); | |
3300 | } | |
3301 | ||
3302 | void RGWPutObj::execute() | |
3303 | { | |
3304 | RGWPutObjProcessor *processor = NULL; | |
3305 | RGWPutObjDataProcessor *filter = nullptr; | |
3306 | std::unique_ptr<RGWPutObjDataProcessor> encrypt; | |
3307 | char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1]; | |
3308 | char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; | |
3309 | char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; | |
3310 | unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
3311 | MD5 hash; | |
3312 | bufferlist bl, aclbl, bs; | |
3313 | int len; | |
3314 | map<string, string>::iterator iter; | |
3315 | bool multipart; | |
3316 | ||
3317 | off_t fst; | |
3318 | off_t lst; | |
3319 | const auto& compression_type = store->get_zone_params().get_compression_type( | |
3320 | s->bucket_info.placement_rule); | |
3321 | CompressorRef plugin; | |
3322 | boost::optional<RGWPutObj_Compress> compressor; | |
3323 | ||
3324 | bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL); | |
3325 | perfcounter->inc(l_rgw_put); | |
3326 | op_ret = -EINVAL; | |
3327 | if (s->object.empty()) { | |
3328 | goto done; | |
3329 | } | |
3330 | ||
3331 | if (!s->bucket_exists) { | |
3332 | op_ret = -ERR_NO_SUCH_BUCKET; | |
3333 | return; | |
3334 | } | |
3335 | ||
3336 | op_ret = get_params(); | |
3337 | if (op_ret < 0) { | |
3338 | ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl; | |
3339 | goto done; | |
3340 | } | |
3341 | ||
3342 | op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); | |
3343 | if (op_ret < 0) { | |
3344 | ldout(s->cct, 20) << "get_system_versioning_params() returned ret=" | |
3345 | << op_ret << dendl; | |
3346 | goto done; | |
3347 | } | |
3348 | ||
3349 | if (supplied_md5_b64) { | |
3350 | need_calc_md5 = true; | |
3351 | ||
3352 | ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; | |
3353 | op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], | |
3354 | supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); | |
3355 | ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl; | |
3356 | if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { | |
3357 | op_ret = -ERR_INVALID_DIGEST; | |
3358 | goto done; | |
3359 | } | |
3360 | ||
3361 | buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); | |
3362 | ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl; | |
3363 | } | |
3364 | ||
3365 | if (!chunked_upload) { /* with chunked upload we don't know how big is the upload. | |
3366 | we also check sizes at the end anyway */ | |
3367 | op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket, | |
3368 | user_quota, bucket_quota, s->content_length); | |
3369 | if (op_ret < 0) { | |
3370 | ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl; | |
3371 | goto done; | |
3372 | } | |
3373 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
3374 | if (op_ret < 0) { | |
3375 | ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; | |
3376 | goto done; | |
3377 | } | |
3378 | } | |
3379 | ||
3380 | if (supplied_etag) { | |
3381 | strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1); | |
3382 | supplied_md5[sizeof(supplied_md5) - 1] = '\0'; | |
3383 | } | |
3384 | ||
3385 | processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart); | |
3386 | ||
3387 | // no filters by default | |
3388 | filter = processor; | |
3389 | ||
3390 | /* Handle object versioning of Swift API. */ | |
3391 | if (! multipart) { | |
3392 | rgw_obj obj(s->bucket, s->object); | |
3393 | op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx), | |
3394 | s->bucket_owner.get_id(), | |
3395 | s->bucket_info, | |
3396 | obj); | |
3397 | if (op_ret < 0) { | |
3398 | goto done; | |
3399 | } | |
3400 | } | |
3401 | ||
3402 | op_ret = processor->prepare(store, NULL); | |
3403 | if (op_ret < 0) { | |
3404 | ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret | |
3405 | << dendl; | |
3406 | goto done; | |
3407 | } | |
3408 | ||
3409 | fst = copy_source_range_fst; | |
3410 | lst = copy_source_range_lst; | |
3411 | ||
3412 | op_ret = get_encrypt_filter(&encrypt, filter); | |
3413 | if (op_ret < 0) { | |
3414 | goto done; | |
3415 | } | |
3416 | if (encrypt != nullptr) { | |
3417 | filter = encrypt.get(); | |
3418 | } else { | |
3419 | //no encryption, we can try compression | |
3420 | if (compression_type != "none") { | |
3421 | plugin = get_compressor_plugin(s, compression_type); | |
3422 | if (!plugin) { | |
3423 | ldout(s->cct, 1) << "Cannot load plugin for compression type " | |
3424 | << compression_type << dendl; | |
3425 | } else { | |
3426 | compressor.emplace(s->cct, plugin, filter); | |
3427 | filter = &*compressor; | |
3428 | } | |
3429 | } | |
3430 | } | |
3431 | ||
3432 | do { | |
3433 | bufferlist data; | |
3434 | if (fst > lst) | |
3435 | break; | |
3436 | if (!copy_source) { | |
3437 | len = get_data(data); | |
3438 | } else { | |
3439 | uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst); | |
3440 | op_ret = get_data(fst, cur_lst, data); | |
3441 | if (op_ret < 0) | |
3442 | goto done; | |
3443 | len = data.length(); | |
3444 | s->content_length += len; | |
3445 | fst += len; | |
3446 | } | |
3447 | if (len < 0) { | |
3448 | op_ret = len; | |
3449 | goto done; | |
3450 | } | |
3451 | ||
3452 | if (need_calc_md5) { | |
3453 | hash.Update((const byte *)data.c_str(), data.length()); | |
3454 | } | |
3455 | ||
3456 | /* update torrrent */ | |
3457 | torrent.update(data); | |
3458 | ||
3459 | /* do we need this operation to be synchronous? if we're dealing with an object with immutable | |
3460 | * head, e.g., multipart object we need to make sure we're the first one writing to this object | |
3461 | */ | |
3462 | bool need_to_wait = (ofs == 0) && multipart; | |
3463 | ||
3464 | bufferlist orig_data; | |
3465 | ||
3466 | if (need_to_wait) { | |
3467 | orig_data = data; | |
3468 | } | |
3469 | ||
3470 | op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait); | |
3471 | if (op_ret < 0) { | |
3472 | if (!need_to_wait || op_ret != -EEXIST) { | |
3473 | ldout(s->cct, 20) << "processor->thottle_data() returned ret=" | |
3474 | << op_ret << dendl; | |
3475 | goto done; | |
3476 | } | |
3477 | /* need_to_wait == true and op_ret == -EEXIST */ | |
3478 | ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl; | |
3479 | ||
3480 | /* restore original data */ | |
3481 | data.swap(orig_data); | |
3482 | ||
3483 | /* restart processing with different oid suffix */ | |
3484 | ||
3485 | dispose_processor(processor); | |
3486 | processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart); | |
3487 | filter = processor; | |
3488 | ||
3489 | string oid_rand; | |
3490 | char buf[33]; | |
3491 | gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1); | |
3492 | oid_rand.append(buf); | |
3493 | ||
3494 | op_ret = processor->prepare(store, &oid_rand); | |
3495 | if (op_ret < 0) { | |
3496 | ldout(s->cct, 0) << "ERROR: processor->prepare() returned " | |
3497 | << op_ret << dendl; | |
3498 | goto done; | |
3499 | } | |
3500 | ||
3501 | op_ret = get_encrypt_filter(&encrypt, filter); | |
3502 | if (op_ret < 0) { | |
3503 | goto done; | |
3504 | } | |
3505 | if (encrypt != nullptr) { | |
3506 | filter = encrypt.get(); | |
3507 | } else { | |
3508 | if (compressor) { | |
3509 | compressor.emplace(s->cct, plugin, filter); | |
3510 | filter = &*compressor; | |
3511 | } | |
3512 | } | |
3513 | op_ret = put_data_and_throttle(filter, data, ofs, false); | |
3514 | if (op_ret < 0) { | |
3515 | goto done; | |
3516 | } | |
3517 | } | |
3518 | ||
3519 | ofs += len; | |
3520 | } while (len > 0); | |
3521 | ||
3522 | { | |
3523 | bufferlist flush; | |
3524 | op_ret = put_data_and_throttle(filter, flush, ofs, false); | |
3525 | if (op_ret < 0) { | |
3526 | goto done; | |
3527 | } | |
3528 | } | |
3529 | ||
3530 | if (!chunked_upload && ofs != s->content_length) { | |
3531 | op_ret = -ERR_REQUEST_TIMEOUT; | |
3532 | goto done; | |
3533 | } | |
3534 | s->obj_size = ofs; | |
3535 | ||
3536 | perfcounter->inc(l_rgw_put_b, s->obj_size); | |
3537 | ||
3538 | op_ret = do_aws4_auth_completion(); | |
3539 | if (op_ret < 0) { | |
3540 | goto done; | |
3541 | } | |
3542 | ||
3543 | op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket, | |
3544 | user_quota, bucket_quota, s->obj_size); | |
3545 | if (op_ret < 0) { | |
3546 | ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl; | |
3547 | goto done; | |
3548 | } | |
3549 | ||
3550 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
3551 | if (op_ret < 0) { | |
3552 | ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; | |
3553 | goto done; | |
3554 | } | |
3555 | ||
3556 | hash.Final(m); | |
3557 | ||
3558 | if (compressor && compressor->is_compressed()) { | |
3559 | bufferlist tmp; | |
3560 | RGWCompressionInfo cs_info; | |
3561 | cs_info.compression_type = plugin->get_type_name(); | |
3562 | cs_info.orig_size = s->obj_size; | |
3563 | cs_info.blocks = move(compressor->get_compression_blocks()); | |
3564 | ::encode(cs_info, tmp); | |
3565 | attrs[RGW_ATTR_COMPRESSION] = tmp; | |
3566 | ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION | |
3567 | << " with type=" << cs_info.compression_type | |
3568 | << ", orig_size=" << cs_info.orig_size | |
3569 | << ", blocks=" << cs_info.blocks.size() << dendl; | |
3570 | } | |
3571 | ||
3572 | buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); | |
3573 | ||
3574 | etag = calc_md5; | |
3575 | ||
3576 | if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) { | |
3577 | op_ret = -ERR_BAD_DIGEST; | |
3578 | goto done; | |
3579 | } | |
3580 | ||
3581 | policy.encode(aclbl); | |
3582 | emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); | |
3583 | ||
3584 | if (dlo_manifest) { | |
3585 | op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs); | |
3586 | if (op_ret < 0) { | |
3587 | ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl; | |
3588 | goto done; | |
3589 | } | |
3590 | complete_etag(hash, &etag); | |
3591 | ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; | |
3592 | } | |
3593 | ||
3594 | if (slo_info) { | |
3595 | bufferlist manifest_bl; | |
3596 | ::encode(*slo_info, manifest_bl); | |
3597 | emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl)); | |
3598 | ||
3599 | hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len); | |
3600 | complete_etag(hash, &etag); | |
3601 | ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; | |
3602 | } | |
3603 | ||
3604 | if (supplied_etag && etag.compare(supplied_etag) != 0) { | |
3605 | op_ret = -ERR_UNPROCESSABLE_ENTITY; | |
3606 | goto done; | |
3607 | } | |
3608 | bl.append(etag.c_str(), etag.size() + 1); | |
3609 | emplace_attr(RGW_ATTR_ETAG, std::move(bl)); | |
3610 | ||
3611 | populate_with_generic_attrs(s, attrs); | |
3612 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); | |
3613 | if (op_ret < 0) { | |
3614 | goto done; | |
3615 | } | |
3616 | encode_delete_at_attr(delete_at, attrs); | |
3617 | encode_obj_tags_attr(obj_tags.get(), attrs); | |
3618 | ||
3619 | /* Add a custom metadata to expose the information whether an object | |
3620 | * is an SLO or not. Appending the attribute must be performed AFTER | |
3621 | * processing any input from user in order to prohibit overwriting. */ | |
3622 | if (slo_info) { | |
3623 | bufferlist slo_userindicator_bl; | |
3624 | slo_userindicator_bl.append("True", 4); | |
3625 | emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl)); | |
3626 | } | |
3627 | ||
3628 | op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs, | |
3629 | (delete_at ? *delete_at : real_time()), if_match, if_nomatch, | |
3630 | (user_data.empty() ? nullptr : &user_data)); | |
3631 | ||
3632 | /* produce torrent */ | |
3633 | if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len())) | |
3634 | { | |
3635 | torrent.init(s, store); | |
3636 | torrent.set_create_date(mtime); | |
3637 | op_ret = torrent.complete(); | |
3638 | if (0 != op_ret) | |
3639 | { | |
3640 | ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl; | |
3641 | goto done; | |
3642 | } | |
3643 | } | |
3644 | ||
3645 | done: | |
3646 | dispose_processor(processor); | |
3647 | perfcounter->tinc(l_rgw_put_lat, | |
3648 | (ceph_clock_now() - s->time)); | |
3649 | } | |
3650 | ||
3651 | int RGWPostObj::verify_permission() | |
3652 | { | |
3653 | return 0; | |
3654 | } | |
3655 | /* | |
3656 | RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx) | |
3657 | { | |
3658 | RGWPutObjProcessor *processor; | |
3659 | ||
3660 | uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size; | |
3661 | ||
3662 | processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled()); | |
3663 | ||
3664 | return processor; | |
3665 | } | |
3666 | ||
3667 | void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor) | |
3668 | { | |
3669 | delete processor; | |
3670 | } | |
3671 | */ | |
3672 | void RGWPostObj::pre_exec() | |
3673 | { | |
3674 | rgw_bucket_object_pre_exec(s); | |
3675 | } | |
3676 | ||
3677 | void RGWPostObj::execute() | |
3678 | { | |
3679 | RGWPutObjDataProcessor *filter = nullptr; | |
3680 | boost::optional<RGWPutObj_Compress> compressor; | |
3681 | CompressorRef plugin; | |
3682 | char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; | |
3683 | ||
3684 | /* Read in the data from the POST form. */ | |
3685 | op_ret = get_params(); | |
3686 | if (op_ret < 0) { | |
3687 | return; | |
3688 | } | |
3689 | ||
3690 | op_ret = verify_params(); | |
3691 | if (op_ret < 0) { | |
3692 | return; | |
3693 | } | |
3694 | ||
3695 | if (s->iam_policy) { | |
3696 | auto e = s->iam_policy->eval(s->env, *s->auth.identity, | |
3697 | rgw::IAM::s3PutObject, | |
3698 | rgw_obj(s->bucket, s->object)); | |
3699 | if (e == Effect::Deny) { | |
3700 | op_ret = -EACCES; | |
3701 | return; | |
3702 | } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
3703 | op_ret = -EACCES; | |
3704 | return; | |
3705 | } | |
3706 | } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
3707 | op_ret = -EACCES; | |
3708 | return; | |
3709 | } | |
3710 | ||
3711 | /* Start iteration over data fields. It's necessary as Swift's FormPost | |
3712 | * is capable to handle multiple files in single form. */ | |
3713 | do { | |
3714 | std::unique_ptr<RGWPutObjDataProcessor> encrypt; | |
3715 | char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; | |
3716 | unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
3717 | MD5 hash; | |
3718 | ceph::buffer::list bl, aclbl; | |
3719 | int len = 0; | |
3720 | ||
3721 | op_ret = store->check_quota(s->bucket_owner.get_id(), | |
3722 | s->bucket, | |
3723 | user_quota, | |
3724 | bucket_quota, | |
3725 | s->content_length); | |
3726 | if (op_ret < 0) { | |
3727 | return; | |
3728 | } | |
3729 | ||
3730 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
3731 | if (op_ret < 0) { | |
3732 | return; | |
3733 | } | |
3734 | ||
3735 | if (supplied_md5_b64) { | |
3736 | char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1]; | |
3737 | ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; | |
3738 | op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], | |
3739 | supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); | |
3740 | ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl; | |
3741 | if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { | |
3742 | op_ret = -ERR_INVALID_DIGEST; | |
3743 | return; | |
3744 | } | |
3745 | ||
3746 | buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); | |
3747 | ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl; | |
3748 | } | |
3749 | ||
3750 | RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), | |
3751 | s->bucket_info, | |
3752 | s->bucket, | |
3753 | get_current_filename(), | |
3754 | /* part size */ | |
3755 | s->cct->_conf->rgw_obj_stripe_size, | |
3756 | s->req_id, | |
3757 | s->bucket_info.versioning_enabled()); | |
3758 | /* No filters by default. */ | |
3759 | filter = &processor; | |
3760 | ||
3761 | op_ret = processor.prepare(store, nullptr); | |
3762 | if (op_ret < 0) { | |
3763 | return; | |
3764 | } | |
3765 | ||
3766 | op_ret = get_encrypt_filter(&encrypt, filter); | |
3767 | if (op_ret < 0) { | |
3768 | return; | |
3769 | } | |
3770 | if (encrypt != nullptr) { | |
3771 | filter = encrypt.get(); | |
3772 | } else { | |
3773 | const auto& compression_type = store->get_zone_params().get_compression_type( | |
3774 | s->bucket_info.placement_rule); | |
3775 | if (compression_type != "none") { | |
3776 | plugin = Compressor::create(s->cct, compression_type); | |
3777 | if (!plugin) { | |
3778 | ldout(s->cct, 1) << "Cannot load plugin for compression type " | |
3779 | << compression_type << dendl; | |
3780 | } else { | |
3781 | compressor.emplace(s->cct, plugin, filter); | |
3782 | filter = &*compressor; | |
3783 | } | |
3784 | } | |
3785 | } | |
3786 | ||
3787 | bool again; | |
3788 | do { | |
3789 | ceph::bufferlist data; | |
3790 | len = get_data(data, again); | |
3791 | ||
3792 | if (len < 0) { | |
3793 | op_ret = len; | |
3794 | return; | |
3795 | } | |
3796 | ||
3797 | if (!len) { | |
3798 | break; | |
3799 | } | |
3800 | ||
3801 | hash.Update((const byte *)data.c_str(), data.length()); | |
3802 | op_ret = put_data_and_throttle(filter, data, ofs, false); | |
3803 | ||
3804 | ofs += len; | |
3805 | ||
3806 | if (ofs > max_len) { | |
3807 | op_ret = -ERR_TOO_LARGE; | |
3808 | return; | |
3809 | } | |
3810 | } while (again); | |
3811 | ||
3812 | { | |
3813 | bufferlist flush; | |
3814 | op_ret = put_data_and_throttle(filter, flush, ofs, false); | |
3815 | } | |
3816 | ||
3817 | if (len < min_len) { | |
3818 | op_ret = -ERR_TOO_SMALL; | |
3819 | return; | |
3820 | } | |
3821 | ||
3822 | s->obj_size = ofs; | |
3823 | ||
3824 | if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) { | |
3825 | op_ret = -ERR_BAD_DIGEST; | |
3826 | return; | |
3827 | } | |
3828 | ||
3829 | op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket, | |
3830 | user_quota, bucket_quota, s->obj_size); | |
3831 | if (op_ret < 0) { | |
3832 | return; | |
3833 | } | |
3834 | ||
3835 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
3836 | if (op_ret < 0) { | |
3837 | return; | |
3838 | } | |
3839 | ||
3840 | hash.Final(m); | |
3841 | buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); | |
3842 | ||
3843 | etag = calc_md5; | |
3844 | bl.append(etag.c_str(), etag.size() + 1); | |
3845 | emplace_attr(RGW_ATTR_ETAG, std::move(bl)); | |
3846 | ||
3847 | policy.encode(aclbl); | |
3848 | emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); | |
3849 | ||
3850 | const std::string content_type = get_current_content_type(); | |
3851 | if (! content_type.empty()) { | |
3852 | ceph::bufferlist ct_bl; | |
3853 | ct_bl.append(content_type.c_str(), content_type.size() + 1); | |
3854 | emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl)); | |
3855 | } | |
3856 | ||
3857 | if (compressor && compressor->is_compressed()) { | |
3858 | ceph::bufferlist tmp; | |
3859 | RGWCompressionInfo cs_info; | |
3860 | cs_info.compression_type = plugin->get_type_name(); | |
3861 | cs_info.orig_size = s->obj_size; | |
3862 | cs_info.blocks = move(compressor->get_compression_blocks()); | |
3863 | ::encode(cs_info, tmp); | |
3864 | emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp)); | |
3865 | } | |
3866 | ||
3867 | op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(), | |
3868 | attrs, (delete_at ? *delete_at : real_time())); | |
3869 | } while (is_next_file_to_upload()); | |
3870 | } | |
3871 | ||
3872 | ||
3873 | void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs, | |
3874 | const set<string>& rmattr_names, | |
3875 | map<int, string>& temp_url_keys) | |
3876 | { | |
3877 | map<string, bufferlist>::iterator iter; | |
3878 | ||
3879 | iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1); | |
3880 | if (iter != add_attrs.end()) { | |
3881 | temp_url_keys[0] = iter->second.c_str(); | |
3882 | add_attrs.erase(iter); | |
3883 | } | |
3884 | ||
3885 | iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2); | |
3886 | if (iter != add_attrs.end()) { | |
3887 | temp_url_keys[1] = iter->second.c_str(); | |
3888 | add_attrs.erase(iter); | |
3889 | } | |
3890 | ||
3891 | for (const string& name : rmattr_names) { | |
3892 | if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) { | |
3893 | temp_url_keys[0] = string(); | |
3894 | } | |
3895 | if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) { | |
3896 | temp_url_keys[1] = string(); | |
3897 | } | |
3898 | } | |
3899 | } | |
3900 | ||
3901 | int RGWPutMetadataAccount::init_processing() | |
3902 | { | |
3903 | /* First, go to the base class. At the time of writing the method was | |
3904 | * responsible only for initializing the quota. This isn't necessary | |
3905 | * here as we are touching metadata only. I'm putting this call only | |
3906 | * for the future. */ | |
3907 | op_ret = RGWOp::init_processing(); | |
3908 | if (op_ret < 0) { | |
3909 | return op_ret; | |
3910 | } | |
3911 | ||
3912 | op_ret = get_params(); | |
3913 | if (op_ret < 0) { | |
3914 | return op_ret; | |
3915 | } | |
3916 | ||
3917 | op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs, | |
3918 | &acct_op_tracker); | |
3919 | if (op_ret < 0) { | |
3920 | return op_ret; | |
3921 | } | |
3922 | ||
3923 | if (has_policy) { | |
3924 | bufferlist acl_bl; | |
3925 | policy.encode(acl_bl); | |
3926 | attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl)); | |
3927 | } | |
3928 | ||
3929 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); | |
3930 | if (op_ret < 0) { | |
3931 | return op_ret; | |
3932 | } | |
3933 | prepare_add_del_attrs(orig_attrs, rmattr_names, attrs); | |
3934 | populate_with_generic_attrs(s, attrs); | |
3935 | ||
3936 | /* Try extract the TempURL-related stuff now to allow verify_permission | |
3937 | * evaluate whether we need FULL_CONTROL or not. */ | |
3938 | filter_out_temp_url(attrs, rmattr_names, temp_url_keys); | |
3939 | ||
3940 | /* The same with quota except a client needs to be reseller admin. */ | |
3941 | op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota, | |
3942 | &new_quota_extracted); | |
3943 | if (op_ret < 0) { | |
3944 | return op_ret; | |
3945 | } | |
3946 | ||
3947 | return 0; | |
3948 | } | |
3949 | ||
3950 | int RGWPutMetadataAccount::verify_permission() | |
3951 | { | |
3952 | if (s->auth.identity->is_anonymous()) { | |
3953 | return -EACCES; | |
3954 | } | |
3955 | ||
3956 | if (!verify_user_permission(s, RGW_PERM_WRITE)) { | |
3957 | return -EACCES; | |
3958 | } | |
3959 | ||
3960 | /* Altering TempURL keys requires FULL_CONTROL. */ | |
3961 | if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) { | |
3962 | return -EPERM; | |
3963 | } | |
3964 | ||
3965 | /* We are failing this intensionally to allow system user/reseller admin | |
3966 | * override in rgw_process.cc. This is the way to specify a given RGWOp | |
3967 | * expect extra privileges. */ | |
3968 | if (new_quota_extracted) { | |
3969 | return -EACCES; | |
3970 | } | |
3971 | ||
3972 | return 0; | |
3973 | } | |
3974 | ||
3975 | void RGWPutMetadataAccount::execute() | |
3976 | { | |
3977 | /* Params have been extracted earlier. See init_processing(). */ | |
3978 | RGWUserInfo new_uinfo; | |
3979 | op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo, | |
3980 | &acct_op_tracker); | |
3981 | if (op_ret < 0) { | |
3982 | return; | |
3983 | } | |
3984 | ||
3985 | /* Handle the TempURL-related stuff. */ | |
3986 | if (!temp_url_keys.empty()) { | |
3987 | for (auto& pair : temp_url_keys) { | |
3988 | new_uinfo.temp_url_keys[pair.first] = std::move(pair.second); | |
3989 | } | |
3990 | } | |
3991 | ||
3992 | /* Handle the quota extracted at the verify_permission step. */ | |
3993 | if (new_quota_extracted) { | |
3994 | new_uinfo.user_quota = std::move(new_quota); | |
3995 | } | |
3996 | ||
3997 | /* We are passing here the current (old) user info to allow the function | |
3998 | * optimize-out some operations. */ | |
3999 | op_ret = rgw_store_user_info(store, new_uinfo, s->user, | |
4000 | &acct_op_tracker, real_time(), false, &attrs); | |
4001 | } | |
4002 | ||
4003 | int RGWPutMetadataBucket::verify_permission() | |
4004 | { | |
4005 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
4006 | return -EACCES; | |
4007 | } | |
4008 | ||
4009 | return 0; | |
4010 | } | |
4011 | ||
4012 | void RGWPutMetadataBucket::pre_exec() | |
4013 | { | |
4014 | rgw_bucket_object_pre_exec(s); | |
4015 | } | |
4016 | ||
4017 | void RGWPutMetadataBucket::execute() | |
4018 | { | |
4019 | op_ret = get_params(); | |
4020 | if (op_ret < 0) { | |
4021 | return; | |
4022 | } | |
4023 | ||
4024 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); | |
4025 | if (op_ret < 0) { | |
4026 | return; | |
4027 | } | |
4028 | ||
4029 | if (!placement_rule.empty() && | |
4030 | placement_rule != s->bucket_info.placement_rule) { | |
4031 | op_ret = -EEXIST; | |
4032 | return; | |
4033 | } | |
4034 | ||
4035 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
4036 | /* Encode special metadata first as we're using std::map::emplace under | |
4037 | * the hood. This method will add the new items only if the map doesn't | |
4038 | * contain such keys yet. */ | |
4039 | if (has_policy) { | |
4040 | if (s->dialect.compare("swift") == 0) { | |
4041 | auto old_policy = \ | |
4042 | static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get()); | |
4043 | auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy); | |
4044 | new_policy->filter_merge(policy_rw_mask, old_policy); | |
4045 | policy = *new_policy; | |
4046 | } | |
4047 | buffer::list bl; | |
4048 | policy.encode(bl); | |
4049 | emplace_attr(RGW_ATTR_ACL, std::move(bl)); | |
4050 | } | |
4051 | ||
4052 | if (has_cors) { | |
4053 | buffer::list bl; | |
4054 | cors_config.encode(bl); | |
4055 | emplace_attr(RGW_ATTR_CORS, std::move(bl)); | |
4056 | } | |
4057 | ||
4058 | /* It's supposed that following functions WILL NOT change any | |
4059 | * special attributes (like RGW_ATTR_ACL) if they are already | |
4060 | * present in attrs. */ | |
4061 | prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); | |
4062 | populate_with_generic_attrs(s, attrs); | |
4063 | ||
4064 | /* According to the Swift's behaviour and its container_quota | |
4065 | * WSGI middleware implementation: anyone with write permissions | |
4066 | * is able to set the bucket quota. This stays in contrast to | |
4067 | * account quotas that can be set only by clients holding | |
4068 | * reseller admin privileges. */ | |
4069 | op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota); | |
4070 | if (op_ret < 0) { | |
4071 | return op_ret; | |
4072 | } | |
4073 | ||
4074 | if (swift_ver_location) { | |
4075 | s->bucket_info.swift_ver_location = *swift_ver_location; | |
4076 | s->bucket_info.swift_versioning = (!swift_ver_location->empty()); | |
4077 | } | |
4078 | ||
4079 | /* Web site of Swift API. */ | |
4080 | filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf); | |
4081 | s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty(); | |
4082 | ||
4083 | /* Setting attributes also stores the provided bucket info. Due | |
4084 | * to this fact, the new quota settings can be serialized with | |
4085 | * the same call. */ | |
4086 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, | |
4087 | &s->bucket_info.objv_tracker); | |
4088 | return op_ret; | |
4089 | }); | |
4090 | } | |
4091 | ||
4092 | int RGWPutMetadataObject::verify_permission() | |
4093 | { | |
4094 | // This looks to be something specific to Swift. We could add | |
4095 | // operations like swift:PutMetadataObject to the Policy Engine. | |
4096 | if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) { | |
4097 | return -EACCES; | |
4098 | } | |
4099 | ||
4100 | return 0; | |
4101 | } | |
4102 | ||
4103 | void RGWPutMetadataObject::pre_exec() | |
4104 | { | |
4105 | rgw_bucket_object_pre_exec(s); | |
4106 | } | |
4107 | ||
4108 | void RGWPutMetadataObject::execute() | |
4109 | { | |
4110 | rgw_obj obj(s->bucket, s->object); | |
4111 | map<string, bufferlist> attrs, orig_attrs, rmattrs; | |
4112 | ||
4113 | store->set_atomic(s->obj_ctx, obj); | |
4114 | ||
4115 | op_ret = get_params(); | |
4116 | if (op_ret < 0) { | |
4117 | return; | |
4118 | } | |
4119 | ||
4120 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); | |
4121 | if (op_ret < 0) { | |
4122 | return; | |
4123 | } | |
4124 | ||
4125 | /* check if obj exists, read orig attrs */ | |
4126 | op_ret = get_obj_attrs(store, s, obj, orig_attrs); | |
4127 | if (op_ret < 0) { | |
4128 | return; | |
4129 | } | |
4130 | ||
4131 | /* Check whether the object has expired. Swift API documentation | |
4132 | * stands that we should return 404 Not Found in such case. */ | |
4133 | if (need_object_expiration() && object_is_expired(orig_attrs)) { | |
4134 | op_ret = -ENOENT; | |
4135 | return; | |
4136 | } | |
4137 | ||
4138 | /* Filter currently existing attributes. */ | |
4139 | prepare_add_del_attrs(orig_attrs, attrs, rmattrs); | |
4140 | populate_with_generic_attrs(s, attrs); | |
4141 | encode_delete_at_attr(delete_at, attrs); | |
4142 | ||
4143 | if (dlo_manifest) { | |
4144 | op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs); | |
4145 | if (op_ret < 0) { | |
4146 | ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl; | |
4147 | return; | |
4148 | } | |
4149 | } | |
4150 | ||
4151 | op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs); | |
4152 | } | |
4153 | ||
4154 | int RGWDeleteObj::handle_slo_manifest(bufferlist& bl) | |
4155 | { | |
4156 | RGWSLOInfo slo_info; | |
4157 | bufferlist::iterator bliter = bl.begin(); | |
4158 | try { | |
4159 | ::decode(slo_info, bliter); | |
4160 | } catch (buffer::error& err) { | |
4161 | ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl; | |
4162 | return -EIO; | |
4163 | } | |
4164 | ||
4165 | try { | |
4166 | deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\ | |
4167 | new RGWBulkDelete::Deleter(store, s)); | |
4168 | } catch (std::bad_alloc) { | |
4169 | return -ENOMEM; | |
4170 | } | |
4171 | ||
4172 | list<RGWBulkDelete::acct_path_t> items; | |
4173 | for (const auto& iter : slo_info.entries) { | |
4174 | const string& path_str = iter.path; | |
4175 | ||
4176 | const size_t sep_pos = path_str.find('/', 1 /* skip first slash */); | |
4177 | if (boost::string_view::npos == sep_pos) { | |
4178 | return -EINVAL; | |
4179 | } | |
4180 | ||
4181 | RGWBulkDelete::acct_path_t path; | |
4182 | ||
4183 | path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1)); | |
4184 | path.obj_key = url_decode(path_str.substr(sep_pos + 1)); | |
4185 | ||
4186 | items.push_back(path); | |
4187 | } | |
4188 | ||
4189 | /* Request removal of the manifest object itself. */ | |
4190 | RGWBulkDelete::acct_path_t path; | |
4191 | path.bucket_name = s->bucket_name; | |
4192 | path.obj_key = s->object; | |
4193 | items.push_back(path); | |
4194 | ||
4195 | int ret = deleter->delete_chunk(items); | |
4196 | if (ret < 0) { | |
4197 | return ret; | |
4198 | } | |
4199 | ||
4200 | return 0; | |
4201 | } | |
4202 | ||
4203 | int RGWDeleteObj::verify_permission() | |
4204 | { | |
4205 | if (s->iam_policy) { | |
4206 | auto r = s->iam_policy->eval(s->env, *s->auth.identity, | |
4207 | s->object.instance.empty() ? | |
4208 | rgw::IAM::s3DeleteObject : | |
4209 | rgw::IAM::s3DeleteObjectVersion, | |
4210 | ARN(s->bucket, s->object.name)); | |
4211 | if (r == Effect::Allow) | |
4212 | return true; | |
4213 | else if (r == Effect::Deny) | |
4214 | return false; | |
4215 | } | |
4216 | ||
4217 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
4218 | return -EACCES; | |
4219 | } | |
4220 | ||
4221 | return 0; | |
4222 | } | |
4223 | ||
4224 | void RGWDeleteObj::pre_exec() | |
4225 | { | |
4226 | rgw_bucket_object_pre_exec(s); | |
4227 | } | |
4228 | ||
4229 | void RGWDeleteObj::execute() | |
4230 | { | |
4231 | if (!s->bucket_exists) { | |
4232 | op_ret = -ERR_NO_SUCH_BUCKET; | |
4233 | return; | |
4234 | } | |
4235 | ||
4236 | op_ret = get_params(); | |
4237 | if (op_ret < 0) { | |
4238 | return; | |
4239 | } | |
4240 | ||
4241 | rgw_obj obj(s->bucket, s->object); | |
4242 | map<string, bufferlist> attrs; | |
4243 | ||
4244 | ||
4245 | if (!s->object.empty()) { | |
4246 | if (need_object_expiration() || multipart_delete) { | |
4247 | /* check if obj exists, read orig attrs */ | |
4248 | op_ret = get_obj_attrs(store, s, obj, attrs); | |
4249 | if (op_ret < 0) { | |
4250 | return; | |
4251 | } | |
4252 | } | |
4253 | ||
4254 | if (multipart_delete) { | |
4255 | const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST); | |
4256 | ||
4257 | if (slo_attr != attrs.end()) { | |
4258 | op_ret = handle_slo_manifest(slo_attr->second); | |
4259 | if (op_ret < 0) { | |
4260 | ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl; | |
4261 | } | |
4262 | } else { | |
4263 | op_ret = -ERR_NOT_SLO_MANIFEST; | |
4264 | } | |
4265 | ||
4266 | return; | |
4267 | } | |
4268 | ||
4269 | RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx); | |
4270 | obj_ctx->obj.set_atomic(obj); | |
4271 | ||
4272 | bool ver_restored = false; | |
4273 | op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(), | |
4274 | s->bucket_info, obj, ver_restored); | |
4275 | if (op_ret < 0) { | |
4276 | return; | |
4277 | } | |
4278 | ||
4279 | if (!ver_restored) { | |
4280 | /* Swift's versioning mechanism hasn't found any previous version of | |
4281 | * the object that could be restored. This means we should proceed | |
4282 | * with the regular delete path. */ | |
4283 | RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj); | |
4284 | RGWRados::Object::Delete del_op(&del_target); | |
4285 | ||
4286 | op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch, | |
4287 | &del_op.params.marker_version_id); | |
4288 | if (op_ret < 0) { | |
4289 | return; | |
4290 | } | |
4291 | ||
4292 | del_op.params.bucket_owner = s->bucket_owner.get_id(); | |
4293 | del_op.params.versioning_status = s->bucket_info.versioning_status(); | |
4294 | del_op.params.obj_owner = s->owner; | |
4295 | del_op.params.unmod_since = unmod_since; | |
4296 | del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */ | |
4297 | ||
4298 | op_ret = del_op.delete_obj(); | |
4299 | if (op_ret >= 0) { | |
4300 | delete_marker = del_op.result.delete_marker; | |
4301 | version_id = del_op.result.version_id; | |
4302 | } | |
4303 | ||
4304 | /* Check whether the object has expired. Swift API documentation | |
4305 | * stands that we should return 404 Not Found in such case. */ | |
4306 | if (need_object_expiration() && object_is_expired(attrs)) { | |
4307 | op_ret = -ENOENT; | |
4308 | return; | |
4309 | } | |
4310 | } | |
4311 | ||
4312 | if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) { | |
4313 | op_ret = 0; | |
4314 | } | |
4315 | } else { | |
4316 | op_ret = -EINVAL; | |
4317 | } | |
4318 | } | |
4319 | ||
4320 | ||
4321 | bool RGWCopyObj::parse_copy_location(const string& url_src, string& bucket_name, rgw_obj_key& key) | |
4322 | { | |
4323 | string name_str; | |
4324 | string params_str; | |
4325 | ||
4326 | size_t pos = url_src.find('?'); | |
4327 | if (pos == string::npos) { | |
4328 | name_str = url_src; | |
4329 | } else { | |
4330 | name_str = url_src.substr(0, pos); | |
4331 | params_str = url_src.substr(pos + 1); | |
4332 | } | |
4333 | ||
4334 | std::string dec_src = url_decode(name_str); | |
4335 | const char *src = dec_src.c_str(); | |
4336 | ||
4337 | if (*src == '/') ++src; | |
4338 | ||
4339 | string str(src); | |
4340 | ||
4341 | pos = str.find('/'); | |
4342 | if (pos ==string::npos) | |
4343 | return false; | |
4344 | ||
4345 | bucket_name = str.substr(0, pos); | |
4346 | key.name = str.substr(pos + 1); | |
4347 | ||
4348 | if (key.name.empty()) { | |
4349 | return false; | |
4350 | } | |
4351 | ||
4352 | if (!params_str.empty()) { | |
4353 | RGWHTTPArgs args; | |
4354 | args.set(params_str); | |
4355 | args.parse(); | |
4356 | ||
4357 | key.instance = args.get("versionId", NULL); | |
4358 | } | |
4359 | ||
4360 | return true; | |
4361 | } | |
4362 | ||
4363 | int RGWCopyObj::verify_permission() | |
4364 | { | |
4365 | RGWAccessControlPolicy src_acl(s->cct); | |
4366 | optional<Policy> src_policy; | |
4367 | op_ret = get_params(); | |
4368 | if (op_ret < 0) | |
4369 | return op_ret; | |
4370 | ||
4371 | op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); | |
4372 | if (op_ret < 0) { | |
4373 | return op_ret; | |
4374 | } | |
4375 | map<string, bufferlist> src_attrs; | |
4376 | ||
4377 | RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
4378 | ||
4379 | if (s->bucket_instance_id.empty()) { | |
4380 | op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs); | |
4381 | } else { | |
4382 | /* will only happen in intra region sync where the source and dest bucket is the same */ | |
4383 | op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs); | |
4384 | } | |
4385 | if (op_ret < 0) { | |
4386 | if (op_ret == -ENOENT) { | |
4387 | op_ret = -ERR_NO_SUCH_BUCKET; | |
4388 | } | |
4389 | return op_ret; | |
4390 | } | |
4391 | ||
4392 | src_bucket = src_bucket_info.bucket; | |
4393 | ||
4394 | /* get buckets info (source and dest) */ | |
4395 | if (s->local_source && source_zone.empty()) { | |
4396 | rgw_obj src_obj(src_bucket, src_object); | |
4397 | store->set_atomic(s->obj_ctx, src_obj); | |
4398 | store->set_prefetch_data(s->obj_ctx, src_obj); | |
4399 | ||
4400 | /* check source object permissions */ | |
4401 | op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl, | |
4402 | src_policy, src_bucket, src_object); | |
4403 | if (op_ret < 0) { | |
4404 | return op_ret; | |
4405 | } | |
4406 | ||
4407 | /* admin request overrides permission checks */ | |
4408 | if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) { | |
4409 | if (src_policy) { | |
4410 | auto e = src_policy->eval(s->env, *s->auth.identity, | |
4411 | src_object.instance.empty() ? | |
4412 | rgw::IAM::s3GetObject : | |
4413 | rgw::IAM::s3GetObjectVersion, | |
4414 | ARN(src_obj)); | |
4415 | if (e == Effect::Deny) { | |
4416 | return -EACCES; | |
4417 | } else if (e == Effect::Pass && | |
4418 | !src_acl.verify_permission(*s->auth.identity, s->perm_mask, | |
4419 | RGW_PERM_READ)) { | |
4420 | return -EACCES; | |
4421 | } | |
4422 | } else if (!src_acl.verify_permission(*s->auth.identity, | |
4423 | s->perm_mask, | |
4424 | RGW_PERM_READ)) { | |
4425 | return -EACCES; | |
4426 | } | |
4427 | } | |
4428 | } | |
4429 | ||
4430 | RGWAccessControlPolicy dest_bucket_policy(s->cct); | |
4431 | map<string, bufferlist> dest_attrs; | |
4432 | ||
4433 | if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source | |
4434 | or intra region sync */ | |
4435 | dest_bucket_info = src_bucket_info; | |
4436 | dest_attrs = src_attrs; | |
4437 | } else { | |
4438 | op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name, | |
4439 | dest_bucket_info, nullptr, &dest_attrs); | |
4440 | if (op_ret < 0) { | |
4441 | if (op_ret == -ENOENT) { | |
4442 | op_ret = -ERR_NO_SUCH_BUCKET; | |
4443 | } | |
4444 | return op_ret; | |
4445 | } | |
4446 | } | |
4447 | ||
4448 | dest_bucket = dest_bucket_info.bucket; | |
4449 | ||
4450 | rgw_obj dest_obj(dest_bucket, dest_object); | |
4451 | store->set_atomic(s->obj_ctx, dest_obj); | |
4452 | ||
4453 | /* check dest bucket permissions */ | |
4454 | op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs, | |
4455 | &dest_bucket_policy, dest_bucket); | |
4456 | if (op_ret < 0) { | |
4457 | return op_ret; | |
4458 | } | |
4459 | ||
4460 | /* admin request overrides permission checks */ | |
4461 | if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) && | |
4462 | ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask, | |
4463 | RGW_PERM_WRITE)) { | |
4464 | return -EACCES; | |
4465 | } | |
4466 | ||
4467 | op_ret = init_dest_policy(); | |
4468 | if (op_ret < 0) { | |
4469 | return op_ret; | |
4470 | } | |
4471 | ||
4472 | return 0; | |
4473 | } | |
4474 | ||
4475 | ||
4476 | int RGWCopyObj::init_common() | |
4477 | { | |
4478 | if (if_mod) { | |
4479 | if (parse_time(if_mod, &mod_time) < 0) { | |
4480 | op_ret = -EINVAL; | |
4481 | return op_ret; | |
4482 | } | |
4483 | mod_ptr = &mod_time; | |
4484 | } | |
4485 | ||
4486 | if (if_unmod) { | |
4487 | if (parse_time(if_unmod, &unmod_time) < 0) { | |
4488 | op_ret = -EINVAL; | |
4489 | return op_ret; | |
4490 | } | |
4491 | unmod_ptr = &unmod_time; | |
4492 | } | |
4493 | ||
4494 | bufferlist aclbl; | |
4495 | dest_policy.encode(aclbl); | |
4496 | emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); | |
4497 | ||
4498 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); | |
4499 | if (op_ret < 0) { | |
4500 | return op_ret; | |
4501 | } | |
4502 | populate_with_generic_attrs(s, attrs); | |
4503 | ||
4504 | return 0; | |
4505 | } | |
4506 | ||
4507 | static void copy_obj_progress_cb(off_t ofs, void *param) | |
4508 | { | |
4509 | RGWCopyObj *op = static_cast<RGWCopyObj *>(param); | |
4510 | op->progress_cb(ofs); | |
4511 | } | |
4512 | ||
4513 | void RGWCopyObj::progress_cb(off_t ofs) | |
4514 | { | |
4515 | if (!s->cct->_conf->rgw_copy_obj_progress) | |
4516 | return; | |
4517 | ||
4518 | if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes) | |
4519 | return; | |
4520 | ||
4521 | send_partial_response(ofs); | |
4522 | ||
4523 | last_ofs = ofs; | |
4524 | } | |
4525 | ||
4526 | void RGWCopyObj::pre_exec() | |
4527 | { | |
4528 | rgw_bucket_object_pre_exec(s); | |
4529 | } | |
4530 | ||
4531 | void RGWCopyObj::execute() | |
4532 | { | |
4533 | if (init_common() < 0) | |
4534 | return; | |
4535 | ||
4536 | rgw_obj src_obj(src_bucket, src_object); | |
4537 | rgw_obj dst_obj(dest_bucket, dest_object); | |
4538 | ||
4539 | RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
4540 | obj_ctx.obj.set_atomic(src_obj); | |
4541 | obj_ctx.obj.set_atomic(dst_obj); | |
4542 | ||
4543 | encode_delete_at_attr(delete_at, attrs); | |
4544 | ||
4545 | bool high_precision_time = (s->system_request); | |
4546 | ||
4547 | /* Handle object versioning of Swift API. In case of copying to remote this | |
4548 | * should fail gently (op_ret == 0) as the dst_obj will not exist here. */ | |
4549 | op_ret = store->swift_versioning_copy(obj_ctx, | |
4550 | dest_bucket_info.owner, | |
4551 | dest_bucket_info, | |
4552 | dst_obj); | |
4553 | if (op_ret < 0) { | |
4554 | return; | |
4555 | } | |
4556 | ||
4557 | op_ret = store->copy_obj(obj_ctx, | |
4558 | s->user->user_id, | |
4559 | client_id, | |
4560 | op_id, | |
4561 | &s->info, | |
4562 | source_zone, | |
4563 | dst_obj, | |
4564 | src_obj, | |
4565 | dest_bucket_info, | |
4566 | src_bucket_info, | |
4567 | &src_mtime, | |
4568 | &mtime, | |
4569 | mod_ptr, | |
4570 | unmod_ptr, | |
4571 | high_precision_time, | |
4572 | if_match, | |
4573 | if_nomatch, | |
4574 | attrs_mod, | |
4575 | copy_if_newer, | |
4576 | attrs, RGW_OBJ_CATEGORY_MAIN, | |
4577 | olh_epoch, | |
4578 | (delete_at ? *delete_at : real_time()), | |
4579 | (version_id.empty() ? NULL : &version_id), | |
4580 | &s->req_id, /* use req_id as tag */ | |
4581 | &etag, | |
4582 | copy_obj_progress_cb, (void *)this | |
4583 | ); | |
4584 | } | |
4585 | ||
4586 | int RGWGetACLs::verify_permission() | |
4587 | { | |
4588 | bool perm; | |
4589 | if (!s->object.empty()) { | |
4590 | perm = verify_object_permission(s, | |
4591 | s->object.instance.empty() ? | |
4592 | rgw::IAM::s3GetObjectAcl : | |
4593 | rgw::IAM::s3GetObjectVersionAcl); | |
4594 | } else { | |
4595 | perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl); | |
4596 | } | |
4597 | if (!perm) | |
4598 | return -EACCES; | |
4599 | ||
4600 | return 0; | |
4601 | } | |
4602 | ||
4603 | void RGWGetACLs::pre_exec() | |
4604 | { | |
4605 | rgw_bucket_object_pre_exec(s); | |
4606 | } | |
4607 | ||
4608 | void RGWGetACLs::execute() | |
4609 | { | |
4610 | stringstream ss; | |
4611 | RGWAccessControlPolicy* const acl = \ | |
4612 | (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get()); | |
4613 | RGWAccessControlPolicy_S3* const s3policy = \ | |
4614 | static_cast<RGWAccessControlPolicy_S3*>(acl); | |
4615 | s3policy->to_xml(ss); | |
4616 | acls = ss.str(); | |
4617 | } | |
4618 | ||
4619 | ||
4620 | ||
4621 | int RGWPutACLs::verify_permission() | |
4622 | { | |
4623 | bool perm; | |
4624 | if (!s->object.empty()) { | |
4625 | perm = verify_object_permission(s, | |
4626 | s->object.instance.empty() ? | |
4627 | rgw::IAM::s3PutObjectAcl : | |
4628 | rgw::IAM::s3PutObjectVersionAcl); | |
4629 | } else { | |
4630 | perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl); | |
4631 | } | |
4632 | if (!perm) | |
4633 | return -EACCES; | |
4634 | ||
4635 | return 0; | |
4636 | } | |
4637 | ||
4638 | int RGWGetLC::verify_permission() | |
4639 | { | |
4640 | bool perm; | |
4641 | perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration); | |
4642 | if (!perm) | |
4643 | return -EACCES; | |
4644 | ||
4645 | return 0; | |
4646 | } | |
4647 | ||
4648 | int RGWPutLC::verify_permission() | |
4649 | { | |
4650 | bool perm; | |
4651 | perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration); | |
4652 | if (!perm) | |
4653 | return -EACCES; | |
4654 | ||
4655 | return 0; | |
4656 | } | |
4657 | ||
4658 | int RGWDeleteLC::verify_permission() | |
4659 | { | |
4660 | bool perm; | |
4661 | perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration); | |
4662 | if (!perm) | |
4663 | return -EACCES; | |
4664 | ||
4665 | return 0; | |
4666 | } | |
4667 | ||
4668 | void RGWPutACLs::pre_exec() | |
4669 | { | |
4670 | rgw_bucket_object_pre_exec(s); | |
4671 | } | |
4672 | ||
4673 | void RGWGetLC::pre_exec() | |
4674 | { | |
4675 | rgw_bucket_object_pre_exec(s); | |
4676 | } | |
4677 | ||
4678 | void RGWPutLC::pre_exec() | |
4679 | { | |
4680 | rgw_bucket_object_pre_exec(s); | |
4681 | } | |
4682 | ||
4683 | void RGWDeleteLC::pre_exec() | |
4684 | { | |
4685 | rgw_bucket_object_pre_exec(s); | |
4686 | } | |
4687 | ||
4688 | void RGWPutACLs::execute() | |
4689 | { | |
4690 | bufferlist bl; | |
4691 | ||
4692 | RGWAccessControlPolicy_S3 *policy = NULL; | |
4693 | RGWACLXMLParser_S3 parser(s->cct); | |
4694 | RGWAccessControlPolicy_S3 new_policy(s->cct); | |
4695 | stringstream ss; | |
4696 | char *new_data = NULL; | |
4697 | rgw_obj obj; | |
4698 | ||
4699 | op_ret = 0; /* XXX redundant? */ | |
4700 | ||
4701 | if (!parser.init()) { | |
4702 | op_ret = -EINVAL; | |
4703 | return; | |
4704 | } | |
4705 | ||
4706 | ||
4707 | RGWAccessControlPolicy* const existing_policy = \ | |
4708 | (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get()); | |
4709 | ||
4710 | owner = existing_policy->get_owner(); | |
4711 | ||
4712 | op_ret = get_params(); | |
4713 | if (op_ret < 0) { | |
4714 | if (op_ret == -ERANGE) { | |
4715 | ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = " | |
4716 | << s->length << dendl; | |
4717 | op_ret = -ERR_MALFORMED_XML; | |
4718 | s->err.message = "The XML you provided was larger than the maximum " + | |
4719 | std::to_string(s->cct->_conf->rgw_max_put_param_size) + | |
4720 | " bytes allowed."; | |
4721 | } | |
4722 | return; | |
4723 | } | |
4724 | ||
4725 | ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; | |
4726 | ||
4727 | if (!s->canned_acl.empty() && len) { | |
4728 | op_ret = -EINVAL; | |
4729 | return; | |
4730 | } | |
4731 | ||
4732 | if (!s->canned_acl.empty() || s->has_acl_header) { | |
4733 | op_ret = get_policy_from_state(store, s, ss); | |
4734 | if (op_ret < 0) | |
4735 | return; | |
4736 | ||
4737 | new_data = strdup(ss.str().c_str()); | |
4738 | free(data); | |
4739 | data = new_data; | |
4740 | len = ss.str().size(); | |
4741 | } | |
4742 | ||
4743 | if (!parser.parse(data, len, 1)) { | |
4744 | op_ret = -EINVAL; | |
4745 | return; | |
4746 | } | |
4747 | policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy")); | |
4748 | if (!policy) { | |
4749 | op_ret = -EINVAL; | |
4750 | return; | |
4751 | } | |
4752 | ||
4753 | const RGWAccessControlList& req_acl = policy->get_acl(); | |
4754 | const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map(); | |
4755 | #define ACL_GRANTS_MAX_NUM 100 | |
4756 | int max_num = s->cct->_conf->rgw_acl_grants_max_num; | |
4757 | if (max_num < 0) { | |
4758 | max_num = ACL_GRANTS_MAX_NUM; | |
4759 | } | |
4760 | ||
4761 | int grants_num = req_grant_map.size(); | |
4762 | if (grants_num > max_num) { | |
4763 | ldout(s->cct, 4) << "An acl can have up to " | |
4764 | << max_num | |
4765 | << " grants, request acl grants num: " | |
4766 | << grants_num << dendl; | |
4767 | op_ret = -ERR_MALFORMED_ACL_ERROR; | |
4768 | s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum " | |
4769 | + std::to_string(max_num) | |
4770 | + " grants allowed in an acl."; | |
4771 | return; | |
4772 | } | |
4773 | ||
4774 | // forward bucket acl requests to meta master zone | |
4775 | if (s->object.empty() && !store->is_meta_master()) { | |
4776 | bufferlist in_data; | |
4777 | // include acl data unless it was generated from a canned_acl | |
4778 | if (s->canned_acl.empty()) { | |
4779 | in_data.append(data, len); | |
4780 | } | |
4781 | op_ret = forward_request_to_master(s, NULL, store, in_data, NULL); | |
4782 | if (op_ret < 0) { | |
4783 | ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; | |
4784 | return; | |
4785 | } | |
4786 | } | |
4787 | ||
4788 | if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
4789 | ldout(s->cct, 15) << "Old AccessControlPolicy"; | |
4790 | policy->to_xml(*_dout); | |
4791 | *_dout << dendl; | |
4792 | } | |
4793 | ||
4794 | op_ret = policy->rebuild(store, &owner, new_policy); | |
4795 | if (op_ret < 0) | |
4796 | return; | |
4797 | ||
4798 | if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
4799 | ldout(s->cct, 15) << "New AccessControlPolicy:"; | |
4800 | new_policy.to_xml(*_dout); | |
4801 | *_dout << dendl; | |
4802 | } | |
4803 | ||
4804 | new_policy.encode(bl); | |
4805 | map<string, bufferlist> attrs; | |
4806 | ||
4807 | if (!s->object.empty()) { | |
4808 | obj = rgw_obj(s->bucket, s->object); | |
4809 | store->set_atomic(s->obj_ctx, obj); | |
4810 | //if instance is empty, we should modify the latest object | |
4811 | op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl); | |
4812 | } else { | |
4813 | attrs = s->bucket_attrs; | |
4814 | attrs[RGW_ATTR_ACL] = bl; | |
4815 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); | |
4816 | } | |
4817 | if (op_ret == -ECANCELED) { | |
4818 | op_ret = 0; /* lost a race, but it's ok because acls are immutable */ | |
4819 | } | |
4820 | } | |
4821 | ||
4822 | static void get_lc_oid(struct req_state *s, string& oid) | |
4823 | { | |
4824 | string shard_id = s->bucket.name + ':' +s->bucket.bucket_id; | |
4825 | int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs; | |
4826 | int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs; | |
4827 | oid = lc_oid_prefix; | |
4828 | char buf[32]; | |
4829 | snprintf(buf, 32, ".%d", index); | |
4830 | oid.append(buf); | |
4831 | return; | |
4832 | } | |
4833 | ||
4834 | void RGWPutLC::execute() | |
4835 | { | |
4836 | bufferlist bl; | |
4837 | ||
4838 | RGWLifecycleConfiguration_S3 *config = NULL; | |
4839 | RGWLCXMLParser_S3 parser(s->cct); | |
4840 | RGWLifecycleConfiguration_S3 new_config(s->cct); | |
4841 | ||
4842 | content_md5 = s->info.env->get("HTTP_CONTENT_MD5"); | |
4843 | if (content_md5 == nullptr) { | |
4844 | op_ret = -ERR_INVALID_REQUEST; | |
4845 | s->err.message = "Missing required header for this request: Content-MD5"; | |
4846 | ldout(s->cct, 5) << s->err.message << dendl; | |
4847 | return; | |
4848 | } | |
4849 | ||
4850 | std::string content_md5_bin; | |
4851 | try { | |
4852 | content_md5_bin = rgw::from_base64(boost::string_view(content_md5)); | |
4853 | } catch (...) { | |
4854 | s->err.message = "Request header Content-MD5 contains character " | |
4855 | "that is not base64 encoded."; | |
4856 | ldout(s->cct, 5) << s->err.message << dendl; | |
4857 | op_ret = -ERR_BAD_DIGEST; | |
4858 | return; | |
4859 | } | |
4860 | ||
4861 | if (!parser.init()) { | |
4862 | op_ret = -EINVAL; | |
4863 | return; | |
4864 | } | |
4865 | ||
4866 | op_ret = get_params(); | |
4867 | if (op_ret < 0) | |
4868 | return; | |
4869 | ||
4870 | ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; | |
4871 | ||
4872 | MD5 data_hash; | |
4873 | unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
4874 | data_hash.Update(reinterpret_cast<const byte*>(data), len); | |
4875 | data_hash.Final(data_hash_res); | |
4876 | ||
4877 | if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) { | |
4878 | op_ret = -ERR_BAD_DIGEST; | |
4879 | s->err.message = "The Content-MD5 you specified did not match what we received."; | |
4880 | ldout(s->cct, 5) << s->err.message | |
4881 | << " Specified content md5: " << content_md5 | |
4882 | << ", calculated content md5: " << data_hash_res | |
4883 | << dendl; | |
4884 | return; | |
4885 | } | |
4886 | ||
4887 | if (!parser.parse(data, len, 1)) { | |
4888 | op_ret = -ERR_MALFORMED_XML; | |
4889 | return; | |
4890 | } | |
4891 | config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration")); | |
4892 | if (!config) { | |
4893 | op_ret = -ERR_MALFORMED_XML; | |
4894 | return; | |
4895 | } | |
4896 | ||
4897 | if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
4898 | ldout(s->cct, 15) << "Old LifecycleConfiguration:"; | |
4899 | config->to_xml(*_dout); | |
4900 | *_dout << dendl; | |
4901 | } | |
4902 | ||
4903 | op_ret = config->rebuild(store, new_config); | |
4904 | if (op_ret < 0) | |
4905 | return; | |
4906 | ||
4907 | if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) { | |
4908 | ldout(s->cct, 15) << "New LifecycleConfiguration:"; | |
4909 | new_config.to_xml(*_dout); | |
4910 | *_dout << dendl; | |
4911 | } | |
4912 | ||
4913 | new_config.encode(bl); | |
4914 | map<string, bufferlist> attrs; | |
4915 | attrs = s->bucket_attrs; | |
4916 | attrs[RGW_ATTR_LC] = bl; | |
4917 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); | |
4918 | if (op_ret < 0) | |
4919 | return; | |
4920 | string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id; | |
4921 | string oid; | |
4922 | get_lc_oid(s, oid); | |
4923 | pair<string, int> entry(shard_id, lc_uninitial); | |
4924 | int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time; | |
4925 | rados::cls::lock::Lock l(lc_index_lock_name); | |
4926 | utime_t time(max_lock_secs, 0); | |
4927 | l.set_duration(time); | |
4928 | l.set_cookie(cookie); | |
4929 | librados::IoCtx *ctx = store->get_lc_pool_ctx(); | |
4930 | do { | |
4931 | op_ret = l.lock_exclusive(ctx, oid); | |
4932 | if (op_ret == -EBUSY) { | |
4933 | dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl; | |
4934 | sleep(5); | |
4935 | continue; | |
4936 | } | |
4937 | if (op_ret < 0) { | |
4938 | dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl; | |
4939 | break; | |
4940 | } | |
4941 | op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry); | |
4942 | if (op_ret < 0) { | |
4943 | dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl; | |
4944 | } | |
4945 | break; | |
4946 | }while(1); | |
4947 | l.unlock(ctx, oid); | |
4948 | return; | |
4949 | } | |
4950 | ||
4951 | void RGWDeleteLC::execute() | |
4952 | { | |
4953 | bufferlist bl; | |
4954 | map<string, bufferlist> orig_attrs, attrs; | |
4955 | map<string, bufferlist>::iterator iter; | |
4956 | rgw_raw_obj obj; | |
4957 | store->get_bucket_instance_obj(s->bucket, obj); | |
4958 | store->set_prefetch_data(s->obj_ctx, obj); | |
4959 | op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker); | |
4960 | if (op_ret < 0) | |
4961 | return; | |
4962 | ||
4963 | for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) { | |
4964 | const string& name = iter->first; | |
4965 | dout(10) << "DeleteLC : attr: " << name << dendl; | |
4966 | if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) { | |
4967 | if (attrs.find(name) == attrs.end()) { | |
4968 | attrs[name] = iter->second; | |
4969 | } | |
4970 | } | |
4971 | } | |
4972 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); | |
4973 | string shard_id = s->bucket.name + ':' +s->bucket.bucket_id; | |
4974 | pair<string, int> entry(shard_id, lc_uninitial); | |
4975 | string oid; | |
4976 | get_lc_oid(s, oid); | |
4977 | int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time; | |
4978 | librados::IoCtx *ctx = store->get_lc_pool_ctx(); | |
4979 | rados::cls::lock::Lock l(lc_index_lock_name); | |
4980 | utime_t time(max_lock_secs, 0); | |
4981 | l.set_duration(time); | |
4982 | do { | |
4983 | op_ret = l.lock_exclusive(ctx, oid); | |
4984 | if (op_ret == -EBUSY) { | |
4985 | dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl; | |
4986 | sleep(5); | |
4987 | continue; | |
4988 | } | |
4989 | if (op_ret < 0) { | |
4990 | dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl; | |
4991 | break; | |
4992 | } | |
4993 | op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry); | |
4994 | if (op_ret < 0) { | |
4995 | dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl; | |
4996 | } | |
4997 | break; | |
4998 | }while(1); | |
4999 | l.unlock(ctx, oid); | |
5000 | return; | |
5001 | } | |
5002 | ||
5003 | int RGWGetCORS::verify_permission() | |
5004 | { | |
5005 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS); | |
5006 | } | |
5007 | ||
5008 | void RGWGetCORS::execute() | |
5009 | { | |
5010 | op_ret = read_bucket_cors(); | |
5011 | if (op_ret < 0) | |
5012 | return ; | |
5013 | ||
5014 | if (!cors_exist) { | |
5015 | dout(2) << "No CORS configuration set yet for this bucket" << dendl; | |
5016 | op_ret = -ENOENT; | |
5017 | return; | |
5018 | } | |
5019 | } | |
5020 | ||
5021 | int RGWPutCORS::verify_permission() | |
5022 | { | |
5023 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS); | |
5024 | } | |
5025 | ||
5026 | void RGWPutCORS::execute() | |
5027 | { | |
5028 | rgw_raw_obj obj; | |
5029 | ||
5030 | op_ret = get_params(); | |
5031 | if (op_ret < 0) | |
5032 | return; | |
5033 | ||
5034 | if (!store->is_meta_master()) { | |
5035 | op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); | |
5036 | if (op_ret < 0) { | |
5037 | ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; | |
5038 | return; | |
5039 | } | |
5040 | } | |
5041 | ||
5042 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
5043 | map<string, bufferlist> attrs = s->bucket_attrs; | |
5044 | attrs[RGW_ATTR_CORS] = cors_bl; | |
5045 | return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); | |
5046 | }); | |
5047 | } | |
5048 | ||
5049 | int RGWDeleteCORS::verify_permission() | |
5050 | { | |
5051 | // No separate delete permission | |
5052 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS); | |
5053 | } | |
5054 | ||
5055 | void RGWDeleteCORS::execute() | |
5056 | { | |
5057 | op_ret = read_bucket_cors(); | |
5058 | if (op_ret < 0) | |
5059 | return; | |
5060 | ||
5061 | bufferlist bl; | |
5062 | if (!cors_exist) { | |
5063 | dout(2) << "No CORS configuration set yet for this bucket" << dendl; | |
5064 | op_ret = -ENOENT; | |
5065 | return; | |
5066 | } | |
5067 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
5068 | rgw_raw_obj obj; | |
5069 | store->get_bucket_instance_obj(s->bucket, obj); | |
5070 | store->set_prefetch_data(s->obj_ctx, obj); | |
5071 | map<string, bufferlist> orig_attrs, attrs, rmattrs; | |
5072 | map<string, bufferlist>::iterator iter; | |
5073 | ||
5074 | op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker); | |
5075 | if (op_ret < 0) | |
5076 | return op_ret; | |
5077 | ||
5078 | /* only remove meta attrs */ | |
5079 | for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) { | |
5080 | const string& name = iter->first; | |
5081 | dout(10) << "DeleteCORS : attr: " << name << dendl; | |
5082 | if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) { | |
5083 | rmattrs[name] = iter->second; | |
5084 | } else if (attrs.find(name) == attrs.end()) { | |
5085 | attrs[name] = iter->second; | |
5086 | } | |
5087 | } | |
5088 | return rgw_bucket_set_attrs(store, s->bucket_info, attrs, | |
5089 | &s->bucket_info.objv_tracker); | |
5090 | }); | |
5091 | } | |
5092 | ||
5093 | void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) { | |
5094 | get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age); | |
5095 | } | |
5096 | ||
5097 | int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) { | |
5098 | rule = cc->host_name_rule(origin); | |
5099 | if (!rule) { | |
5100 | dout(10) << "There is no cors rule present for " << origin << dendl; | |
5101 | return -ENOENT; | |
5102 | } | |
5103 | ||
5104 | if (!validate_cors_rule_method(rule, req_meth)) { | |
5105 | return -ENOENT; | |
5106 | } | |
5107 | ||
5108 | if (!validate_cors_rule_header(rule, req_hdrs)) { | |
5109 | return -ENOENT; | |
5110 | } | |
5111 | ||
5112 | return 0; | |
5113 | } | |
5114 | ||
5115 | void RGWOptionsCORS::execute() | |
5116 | { | |
5117 | op_ret = read_bucket_cors(); | |
5118 | if (op_ret < 0) | |
5119 | return; | |
5120 | ||
5121 | origin = s->info.env->get("HTTP_ORIGIN"); | |
5122 | if (!origin) { | |
5123 | dout(0) << | |
5124 | "Preflight request without mandatory Origin header" | |
5125 | << dendl; | |
5126 | op_ret = -EINVAL; | |
5127 | return; | |
5128 | } | |
5129 | req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); | |
5130 | if (!req_meth) { | |
5131 | dout(0) << | |
5132 | "Preflight request without mandatory Access-control-request-method header" | |
5133 | << dendl; | |
5134 | op_ret = -EINVAL; | |
5135 | return; | |
5136 | } | |
5137 | if (!cors_exist) { | |
5138 | dout(2) << "No CORS configuration set yet for this bucket" << dendl; | |
5139 | op_ret = -ENOENT; | |
5140 | return; | |
5141 | } | |
5142 | req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS"); | |
5143 | op_ret = validate_cors_request(&bucket_cors); | |
5144 | if (!rule) { | |
5145 | origin = req_meth = NULL; | |
5146 | return; | |
5147 | } | |
5148 | return; | |
5149 | } | |
5150 | ||
5151 | int RGWGetRequestPayment::verify_permission() | |
5152 | { | |
5153 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment); | |
5154 | } | |
5155 | ||
5156 | void RGWGetRequestPayment::pre_exec() | |
5157 | { | |
5158 | rgw_bucket_object_pre_exec(s); | |
5159 | } | |
5160 | ||
5161 | void RGWGetRequestPayment::execute() | |
5162 | { | |
5163 | requester_pays = s->bucket_info.requester_pays; | |
5164 | } | |
5165 | ||
5166 | int RGWSetRequestPayment::verify_permission() | |
5167 | { | |
5168 | return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment); | |
5169 | } | |
5170 | ||
5171 | void RGWSetRequestPayment::pre_exec() | |
5172 | { | |
5173 | rgw_bucket_object_pre_exec(s); | |
5174 | } | |
5175 | ||
5176 | void RGWSetRequestPayment::execute() | |
5177 | { | |
5178 | op_ret = get_params(); | |
5179 | ||
5180 | if (op_ret < 0) | |
5181 | return; | |
5182 | ||
5183 | s->bucket_info.requester_pays = requester_pays; | |
5184 | op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), | |
5185 | &s->bucket_attrs); | |
5186 | if (op_ret < 0) { | |
5187 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name | |
5188 | << " returned err=" << op_ret << dendl; | |
5189 | return; | |
5190 | } | |
5191 | } | |
5192 | ||
5193 | int RGWInitMultipart::verify_permission() | |
5194 | { | |
5195 | if (s->iam_policy) { | |
5196 | auto e = s->iam_policy->eval(s->env, *s->auth.identity, | |
5197 | rgw::IAM::s3PutObject, | |
5198 | rgw_obj(s->bucket, s->object)); | |
5199 | if (e == Effect::Allow) { | |
5200 | return 0; | |
5201 | } else if (e == Effect::Deny) { | |
5202 | return -EACCES; | |
5203 | } | |
5204 | } | |
5205 | ||
5206 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
5207 | return -EACCES; | |
5208 | } | |
5209 | ||
5210 | return 0; | |
5211 | } | |
5212 | ||
5213 | void RGWInitMultipart::pre_exec() | |
5214 | { | |
5215 | rgw_bucket_object_pre_exec(s); | |
5216 | } | |
5217 | ||
5218 | void RGWInitMultipart::execute() | |
5219 | { | |
5220 | bufferlist aclbl; | |
5221 | map<string, bufferlist> attrs; | |
5222 | rgw_obj obj; | |
5223 | ||
5224 | if (get_params() < 0) | |
5225 | return; | |
5226 | ||
5227 | if (s->object.empty()) | |
5228 | return; | |
5229 | ||
5230 | policy.encode(aclbl); | |
5231 | attrs[RGW_ATTR_ACL] = aclbl; | |
5232 | ||
5233 | populate_with_generic_attrs(s, attrs); | |
5234 | ||
5235 | /* select encryption mode */ | |
5236 | op_ret = prepare_encryption(attrs); | |
5237 | if (op_ret != 0) | |
5238 | return; | |
5239 | ||
5240 | op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); | |
5241 | if (op_ret < 0) { | |
5242 | return; | |
5243 | } | |
5244 | ||
5245 | do { | |
5246 | char buf[33]; | |
5247 | gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1); | |
5248 | upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */ | |
5249 | upload_id.append(buf); | |
5250 | ||
5251 | string tmp_obj_name; | |
5252 | RGWMPObj mp(s->object.name, upload_id); | |
5253 | tmp_obj_name = mp.get_meta(); | |
5254 | ||
5255 | obj.init_ns(s->bucket, tmp_obj_name, mp_ns); | |
5256 | // the meta object will be indexed with 0 size, we c | |
5257 | obj.set_in_extra_data(true); | |
5258 | obj.index_hash_source = s->object.name; | |
5259 | ||
5260 | RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj); | |
5261 | op_target.set_versioning_disabled(true); /* no versioning for multipart meta */ | |
5262 | ||
5263 | RGWRados::Object::Write obj_op(&op_target); | |
5264 | ||
5265 | obj_op.meta.owner = s->owner.get_id(); | |
5266 | obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA; | |
5267 | obj_op.meta.flags = PUT_OBJ_CREATE_EXCL; | |
5268 | ||
5269 | op_ret = obj_op.write_meta(0, 0, attrs); | |
5270 | } while (op_ret == -EEXIST); | |
5271 | } | |
5272 | ||
5273 | static int get_multipart_info(RGWRados *store, struct req_state *s, | |
5274 | string& meta_oid, | |
5275 | RGWAccessControlPolicy *policy, | |
5276 | map<string, bufferlist>& attrs) | |
5277 | { | |
5278 | map<string, bufferlist>::iterator iter; | |
5279 | bufferlist header; | |
5280 | ||
5281 | rgw_obj obj; | |
5282 | obj.init_ns(s->bucket, meta_oid, mp_ns); | |
5283 | obj.set_in_extra_data(true); | |
5284 | ||
5285 | int op_ret = get_obj_attrs(store, s, obj, attrs); | |
5286 | if (op_ret < 0) { | |
5287 | if (op_ret == -ENOENT) { | |
5288 | return -ERR_NO_SUCH_UPLOAD; | |
5289 | } | |
5290 | return op_ret; | |
5291 | } | |
5292 | ||
5293 | if (policy) { | |
5294 | for (iter = attrs.begin(); iter != attrs.end(); ++iter) { | |
5295 | string name = iter->first; | |
5296 | if (name.compare(RGW_ATTR_ACL) == 0) { | |
5297 | bufferlist& bl = iter->second; | |
5298 | bufferlist::iterator bli = bl.begin(); | |
5299 | try { | |
5300 | ::decode(*policy, bli); | |
5301 | } catch (buffer::error& err) { | |
5302 | ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; | |
5303 | return -EIO; | |
5304 | } | |
5305 | break; | |
5306 | } | |
5307 | } | |
5308 | } | |
5309 | ||
5310 | return 0; | |
5311 | } | |
5312 | ||
5313 | int RGWCompleteMultipart::verify_permission() | |
5314 | { | |
5315 | if (s->iam_policy) { | |
5316 | auto e = s->iam_policy->eval(s->env, *s->auth.identity, | |
5317 | rgw::IAM::s3PutObject, | |
5318 | rgw_obj(s->bucket, s->object)); | |
5319 | if (e == Effect::Allow) { | |
5320 | return 0; | |
5321 | } else if (e == Effect::Deny) { | |
5322 | return -EACCES; | |
5323 | } | |
5324 | } | |
5325 | ||
5326 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
5327 | return -EACCES; | |
5328 | } | |
5329 | ||
5330 | return 0; | |
5331 | } | |
5332 | ||
5333 | void RGWCompleteMultipart::pre_exec() | |
5334 | { | |
5335 | rgw_bucket_object_pre_exec(s); | |
5336 | } | |
5337 | ||
5338 | void RGWCompleteMultipart::execute() | |
5339 | { | |
5340 | RGWMultiCompleteUpload *parts; | |
5341 | map<int, string>::iterator iter; | |
5342 | RGWMultiXMLParser parser; | |
5343 | string meta_oid; | |
5344 | map<uint32_t, RGWUploadPartInfo> obj_parts; | |
5345 | map<uint32_t, RGWUploadPartInfo>::iterator obj_iter; | |
5346 | map<string, bufferlist> attrs; | |
5347 | off_t ofs = 0; | |
5348 | MD5 hash; | |
5349 | char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
5350 | char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16]; | |
5351 | bufferlist etag_bl; | |
5352 | rgw_obj meta_obj; | |
5353 | rgw_obj target_obj; | |
5354 | RGWMPObj mp; | |
5355 | RGWObjManifest manifest; | |
5356 | uint64_t olh_epoch = 0; | |
5357 | string version_id; | |
5358 | ||
5359 | op_ret = get_params(); | |
5360 | if (op_ret < 0) | |
5361 | return; | |
5362 | op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); | |
5363 | if (op_ret < 0) { | |
5364 | return; | |
5365 | } | |
5366 | ||
5367 | if (!data || !len) { | |
5368 | op_ret = -ERR_MALFORMED_XML; | |
5369 | return; | |
5370 | } | |
5371 | ||
5372 | if (!parser.init()) { | |
5373 | op_ret = -EIO; | |
5374 | return; | |
5375 | } | |
5376 | ||
5377 | if (!parser.parse(data, len, 1)) { | |
5378 | op_ret = -ERR_MALFORMED_XML; | |
5379 | return; | |
5380 | } | |
5381 | ||
5382 | parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload")); | |
5383 | if (!parts || parts->parts.empty()) { | |
5384 | op_ret = -ERR_MALFORMED_XML; | |
5385 | return; | |
5386 | } | |
5387 | ||
5388 | if ((int)parts->parts.size() > | |
5389 | s->cct->_conf->rgw_multipart_part_upload_limit) { | |
5390 | op_ret = -ERANGE; | |
5391 | return; | |
5392 | } | |
5393 | ||
5394 | mp.init(s->object.name, upload_id); | |
5395 | meta_oid = mp.get_meta(); | |
5396 | ||
5397 | int total_parts = 0; | |
5398 | int handled_parts = 0; | |
5399 | int max_parts = 1000; | |
5400 | int marker = 0; | |
5401 | bool truncated; | |
5402 | RGWCompressionInfo cs_info; | |
5403 | bool compressed = false; | |
5404 | uint64_t accounted_size = 0; | |
5405 | ||
5406 | uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size; | |
5407 | ||
5408 | list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */ | |
5409 | ||
5410 | bool versioned_object = s->bucket_info.versioning_enabled(); | |
5411 | ||
5412 | iter = parts->parts.begin(); | |
5413 | ||
5414 | meta_obj.init_ns(s->bucket, meta_oid, mp_ns); | |
5415 | meta_obj.set_in_extra_data(true); | |
5416 | meta_obj.index_hash_source = s->object.name; | |
5417 | ||
5418 | /*take a cls lock on meta_obj to prevent racing completions (or retries) | |
5419 | from deleting the parts*/ | |
5420 | rgw_pool meta_pool; | |
5421 | rgw_raw_obj raw_obj; | |
5422 | int max_lock_secs_mp = | |
5423 | s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time"); | |
5424 | utime_t dur(max_lock_secs_mp, 0); | |
5425 | ||
5426 | store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj); | |
5427 | store->get_obj_data_pool((s->bucket_info).placement_rule, | |
5428 | meta_obj,&meta_pool); | |
5429 | store->open_pool_ctx(meta_pool, serializer.ioctx); | |
5430 | ||
5431 | op_ret = serializer.try_lock(raw_obj.oid, dur); | |
5432 | if (op_ret < 0) { | |
5433 | dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl; | |
5434 | op_ret = -ERR_INTERNAL_ERROR; | |
5435 | s->err.message = "This multipart completion is already in progress"; | |
5436 | return; | |
5437 | } | |
5438 | ||
5439 | op_ret = get_obj_attrs(store, s, meta_obj, attrs); | |
5440 | ||
5441 | if (op_ret < 0) { | |
5442 | ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj | |
5443 | << " ret=" << op_ret << dendl; | |
5444 | return; | |
5445 | } | |
5446 | ||
5447 | do { | |
5448 | op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts, | |
5449 | marker, obj_parts, &marker, &truncated); | |
5450 | if (op_ret == -ENOENT) { | |
5451 | op_ret = -ERR_NO_SUCH_UPLOAD; | |
5452 | } | |
5453 | if (op_ret < 0) | |
5454 | return; | |
5455 | ||
5456 | total_parts += obj_parts.size(); | |
5457 | if (!truncated && total_parts != (int)parts->parts.size()) { | |
5458 | ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts | |
5459 | << " expected: " << parts->parts.size() << dendl; | |
5460 | op_ret = -ERR_INVALID_PART; | |
5461 | return; | |
5462 | } | |
5463 | ||
5464 | for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) { | |
5465 | uint64_t part_size = obj_iter->second.accounted_size; | |
5466 | if (handled_parts < (int)parts->parts.size() - 1 && | |
5467 | part_size < min_part_size) { | |
5468 | op_ret = -ERR_TOO_SMALL; | |
5469 | return; | |
5470 | } | |
5471 | ||
5472 | char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
5473 | if (iter->first != (int)obj_iter->first) { | |
5474 | ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: " | |
5475 | << iter->first << " next uploaded: " | |
5476 | << obj_iter->first << dendl; | |
5477 | op_ret = -ERR_INVALID_PART; | |
5478 | return; | |
5479 | } | |
5480 | string part_etag = rgw_string_unquote(iter->second); | |
5481 | if (part_etag.compare(obj_iter->second.etag) != 0) { | |
5482 | ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first | |
5483 | << " etag: " << iter->second << dendl; | |
5484 | op_ret = -ERR_INVALID_PART; | |
5485 | return; | |
5486 | } | |
5487 | ||
5488 | hex_to_buf(obj_iter->second.etag.c_str(), petag, | |
5489 | CEPH_CRYPTO_MD5_DIGESTSIZE); | |
5490 | hash.Update((const byte *)petag, sizeof(petag)); | |
5491 | ||
5492 | RGWUploadPartInfo& obj_part = obj_iter->second; | |
5493 | ||
5494 | /* update manifest for part */ | |
5495 | string oid = mp.get_part(obj_iter->second.num); | |
5496 | rgw_obj src_obj; | |
5497 | src_obj.init_ns(s->bucket, oid, mp_ns); | |
5498 | ||
5499 | if (obj_part.manifest.empty()) { | |
5500 | ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj=" | |
5501 | << src_obj << dendl; | |
5502 | op_ret = -ERR_INVALID_PART; | |
5503 | return; | |
5504 | } else { | |
5505 | manifest.append(obj_part.manifest, store); | |
5506 | } | |
5507 | ||
5508 | if (obj_part.cs_info.compression_type != "none") { | |
5509 | if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) { | |
5510 | ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload (" | |
5511 | << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl; | |
5512 | op_ret = -ERR_INVALID_PART; | |
5513 | return; | |
5514 | } | |
5515 | int64_t new_ofs; // offset in compression data for new part | |
5516 | if (cs_info.blocks.size() > 0) | |
5517 | new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len; | |
5518 | else | |
5519 | new_ofs = 0; | |
5520 | for (const auto& block : obj_part.cs_info.blocks) { | |
5521 | compression_block cb; | |
5522 | cb.old_ofs = block.old_ofs + cs_info.orig_size; | |
5523 | cb.new_ofs = new_ofs; | |
5524 | cb.len = block.len; | |
5525 | cs_info.blocks.push_back(cb); | |
5526 | new_ofs = cb.new_ofs + cb.len; | |
5527 | } | |
5528 | if (!compressed) | |
5529 | cs_info.compression_type = obj_part.cs_info.compression_type; | |
5530 | cs_info.orig_size += obj_part.cs_info.orig_size; | |
5531 | compressed = true; | |
5532 | } | |
5533 | ||
5534 | rgw_obj_index_key remove_key; | |
5535 | src_obj.key.get_index_key(&remove_key); | |
5536 | ||
5537 | remove_objs.push_back(remove_key); | |
5538 | ||
5539 | ofs += obj_part.size; | |
5540 | accounted_size += obj_part.accounted_size; | |
5541 | } | |
5542 | } while (truncated); | |
5543 | hash.Final((byte *)final_etag); | |
5544 | ||
5545 | buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str); | |
5546 | snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, | |
5547 | "-%lld", (long long)parts->parts.size()); | |
5548 | etag = final_etag_str; | |
5549 | ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl; | |
5550 | ||
5551 | etag_bl.append(final_etag_str, strlen(final_etag_str) + 1); | |
5552 | ||
5553 | attrs[RGW_ATTR_ETAG] = etag_bl; | |
5554 | ||
5555 | if (compressed) { | |
5556 | // write compression attribute to full object | |
5557 | bufferlist tmp; | |
5558 | ::encode(cs_info, tmp); | |
5559 | attrs[RGW_ATTR_COMPRESSION] = tmp; | |
5560 | } | |
5561 | ||
5562 | target_obj.init(s->bucket, s->object.name); | |
5563 | if (versioned_object) { | |
5564 | store->gen_rand_obj_instance_name(&target_obj); | |
5565 | } | |
5566 | ||
5567 | RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
5568 | ||
5569 | obj_ctx.obj.set_atomic(target_obj); | |
5570 | ||
5571 | RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj); | |
5572 | RGWRados::Object::Write obj_op(&op_target); | |
5573 | ||
5574 | obj_op.meta.manifest = &manifest; | |
5575 | obj_op.meta.remove_objs = &remove_objs; | |
5576 | ||
5577 | obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */ | |
5578 | obj_op.meta.owner = s->owner.get_id(); | |
5579 | obj_op.meta.flags = PUT_OBJ_CREATE; | |
5580 | obj_op.meta.modify_tail = true; | |
5581 | obj_op.meta.completeMultipart = true; | |
5582 | op_ret = obj_op.write_meta(ofs, accounted_size, attrs); | |
5583 | if (op_ret < 0) | |
5584 | return; | |
5585 | ||
5586 | // remove the upload obj | |
5587 | int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx), | |
5588 | s->bucket_info, meta_obj, 0); | |
5589 | if (r >= 0) { | |
5590 | /* serializer's exclusive lock is released */ | |
5591 | serializer.clear_locked(); | |
5592 | } else { | |
5593 | ldout(store->ctx(), 0) << "WARNING: failed to remove object " | |
5594 | << meta_obj << dendl; | |
5595 | } | |
5596 | } | |
5597 | ||
5598 | int RGWCompleteMultipart::MPSerializer::try_lock( | |
5599 | const std::string& _oid, | |
5600 | utime_t dur) | |
5601 | { | |
5602 | oid = _oid; | |
5603 | op.assert_exists(); | |
5604 | lock.set_duration(dur); | |
5605 | lock.lock_exclusive(&op); | |
5606 | int ret = ioctx.operate(oid, &op); | |
5607 | if (! ret) { | |
5608 | locked = true; | |
5609 | } | |
5610 | return ret; | |
5611 | } | |
5612 | ||
5613 | void RGWCompleteMultipart::complete() | |
5614 | { | |
5615 | /* release exclusive lock iff not already */ | |
5616 | if (unlikely(serializer.locked)) { | |
5617 | int r = serializer.unlock(); | |
5618 | if (r < 0) { | |
5619 | ldout(store->ctx(), 0) << "WARNING: failed to unlock " | |
5620 | << serializer.oid << dendl; | |
5621 | } | |
5622 | } | |
5623 | send_response(); | |
5624 | } | |
5625 | ||
5626 | int RGWAbortMultipart::verify_permission() | |
5627 | { | |
5628 | if (s->iam_policy) { | |
5629 | auto e = s->iam_policy->eval(s->env, *s->auth.identity, | |
5630 | rgw::IAM::s3AbortMultipartUpload, | |
5631 | rgw_obj(s->bucket, s->object)); | |
5632 | if (e == Effect::Allow) { | |
5633 | return 0; | |
5634 | } else if (e == Effect::Deny) { | |
5635 | return -EACCES; | |
5636 | } | |
5637 | } | |
5638 | ||
5639 | if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) { | |
5640 | return -EACCES; | |
5641 | } | |
5642 | ||
5643 | return 0; | |
5644 | } | |
5645 | ||
5646 | void RGWAbortMultipart::pre_exec() | |
5647 | { | |
5648 | rgw_bucket_object_pre_exec(s); | |
5649 | } | |
5650 | ||
5651 | void RGWAbortMultipart::execute() | |
5652 | { | |
5653 | op_ret = -EINVAL; | |
5654 | string upload_id; | |
5655 | string meta_oid; | |
5656 | upload_id = s->info.args.get("uploadId"); | |
5657 | map<string, bufferlist> attrs; | |
5658 | rgw_obj meta_obj; | |
5659 | RGWMPObj mp; | |
5660 | ||
5661 | if (upload_id.empty() || s->object.empty()) | |
5662 | return; | |
5663 | ||
5664 | mp.init(s->object.name, upload_id); | |
5665 | meta_oid = mp.get_meta(); | |
5666 | ||
5667 | op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs); | |
5668 | if (op_ret < 0) | |
5669 | return; | |
5670 | ||
5671 | RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx); | |
5672 | op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp); | |
5673 | } | |
5674 | ||
5675 | int RGWListMultipart::verify_permission() | |
5676 | { | |
5677 | if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts)) | |
5678 | return -EACCES; | |
5679 | ||
5680 | return 0; | |
5681 | } | |
5682 | ||
5683 | void RGWListMultipart::pre_exec() | |
5684 | { | |
5685 | rgw_bucket_object_pre_exec(s); | |
5686 | } | |
5687 | ||
5688 | void RGWListMultipart::execute() | |
5689 | { | |
5690 | map<string, bufferlist> xattrs; | |
5691 | string meta_oid; | |
5692 | RGWMPObj mp; | |
5693 | ||
5694 | op_ret = get_params(); | |
5695 | if (op_ret < 0) | |
5696 | return; | |
5697 | ||
5698 | mp.init(s->object.name, upload_id); | |
5699 | meta_oid = mp.get_meta(); | |
5700 | ||
5701 | op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs); | |
5702 | if (op_ret < 0) | |
5703 | return; | |
5704 | ||
5705 | op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts, | |
5706 | marker, parts, NULL, &truncated); | |
5707 | } | |
5708 | ||
5709 | int RGWListBucketMultiparts::verify_permission() | |
5710 | { | |
5711 | if (!verify_bucket_permission(s, | |
5712 | rgw::IAM::s3ListBucketMultiPartUploads)) | |
5713 | return -EACCES; | |
5714 | ||
5715 | return 0; | |
5716 | } | |
5717 | ||
5718 | void RGWListBucketMultiparts::pre_exec() | |
5719 | { | |
5720 | rgw_bucket_object_pre_exec(s); | |
5721 | } | |
5722 | ||
5723 | void RGWListBucketMultiparts::execute() | |
5724 | { | |
5725 | vector<rgw_bucket_dir_entry> objs; | |
5726 | string marker_meta; | |
5727 | ||
5728 | op_ret = get_params(); | |
5729 | if (op_ret < 0) | |
5730 | return; | |
5731 | ||
5732 | if (s->prot_flags & RGW_REST_SWIFT) { | |
5733 | string path_args; | |
5734 | path_args = s->info.args.get("path"); | |
5735 | if (!path_args.empty()) { | |
5736 | if (!delimiter.empty() || !prefix.empty()) { | |
5737 | op_ret = -EINVAL; | |
5738 | return; | |
5739 | } | |
5740 | prefix = path_args; | |
5741 | delimiter="/"; | |
5742 | } | |
5743 | } | |
5744 | marker_meta = marker.get_meta(); | |
5745 | ||
5746 | op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter, | |
5747 | max_uploads, &objs, &common_prefixes, &is_truncated); | |
5748 | if (op_ret < 0) { | |
5749 | return; | |
5750 | } | |
5751 | ||
5752 | if (!objs.empty()) { | |
5753 | vector<rgw_bucket_dir_entry>::iterator iter; | |
5754 | RGWMultipartUploadEntry entry; | |
5755 | for (iter = objs.begin(); iter != objs.end(); ++iter) { | |
5756 | rgw_obj_key key(iter->key); | |
5757 | if (!entry.mp.from_meta(key.name)) | |
5758 | continue; | |
5759 | entry.obj = *iter; | |
5760 | uploads.push_back(entry); | |
5761 | } | |
5762 | next_marker = entry; | |
5763 | } | |
5764 | } | |
5765 | ||
5766 | void RGWGetHealthCheck::execute() | |
5767 | { | |
5768 | if (!g_conf->rgw_healthcheck_disabling_path.empty() && | |
5769 | (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) { | |
5770 | /* Disabling path specified & existent in the filesystem. */ | |
5771 | op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */ | |
5772 | } else { | |
5773 | op_ret = 0; /* 200 OK */ | |
5774 | } | |
5775 | } | |
5776 | ||
5777 | int RGWDeleteMultiObj::verify_permission() | |
5778 | { | |
5779 | acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE); | |
5780 | if (!acl_allowed && !s->iam_policy) | |
5781 | return -EACCES; | |
5782 | ||
5783 | return 0; | |
5784 | } | |
5785 | ||
5786 | void RGWDeleteMultiObj::pre_exec() | |
5787 | { | |
5788 | rgw_bucket_object_pre_exec(s); | |
5789 | } | |
5790 | ||
5791 | void RGWDeleteMultiObj::execute() | |
5792 | { | |
5793 | RGWMultiDelDelete *multi_delete; | |
5794 | vector<rgw_obj_key>::iterator iter; | |
5795 | RGWMultiDelXMLParser parser; | |
5796 | int num_processed = 0; | |
5797 | RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx); | |
5798 | ||
5799 | op_ret = get_params(); | |
5800 | if (op_ret < 0) { | |
5801 | goto error; | |
5802 | } | |
5803 | ||
5804 | if (!data) { | |
5805 | op_ret = -EINVAL; | |
5806 | goto error; | |
5807 | } | |
5808 | ||
5809 | if (!parser.init()) { | |
5810 | op_ret = -EINVAL; | |
5811 | goto error; | |
5812 | } | |
5813 | ||
5814 | if (!parser.parse(data, len, 1)) { | |
5815 | op_ret = -EINVAL; | |
5816 | goto error; | |
5817 | } | |
5818 | ||
5819 | multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete")); | |
5820 | if (!multi_delete) { | |
5821 | op_ret = -EINVAL; | |
5822 | goto error; | |
5823 | } | |
5824 | ||
5825 | if (multi_delete->is_quiet()) | |
5826 | quiet = true; | |
5827 | ||
5828 | begin_response(); | |
5829 | if (multi_delete->objects.empty()) { | |
5830 | goto done; | |
5831 | } | |
5832 | ||
5833 | for (iter = multi_delete->objects.begin(); | |
5834 | iter != multi_delete->objects.end() && num_processed < max_to_delete; | |
5835 | ++iter, num_processed++) { | |
5836 | rgw_obj obj(bucket, *iter); | |
5837 | if (s->iam_policy) { | |
5838 | auto e = s->iam_policy->eval(s->env, | |
5839 | *s->auth.identity, | |
5840 | iter->instance.empty() ? | |
5841 | rgw::IAM::s3DeleteObject : | |
5842 | rgw::IAM::s3DeleteObjectVersion, | |
5843 | obj); | |
5844 | if ((e == Effect::Deny) || | |
5845 | (e == Effect::Pass && !acl_allowed)) { | |
5846 | send_partial_response(*iter, false, "", -EACCES); | |
5847 | continue; | |
5848 | } | |
5849 | } | |
5850 | ||
5851 | obj_ctx->obj.set_atomic(obj); | |
5852 | ||
5853 | RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj); | |
5854 | RGWRados::Object::Delete del_op(&del_target); | |
5855 | ||
5856 | del_op.params.bucket_owner = s->bucket_owner.get_id(); | |
5857 | del_op.params.versioning_status = s->bucket_info.versioning_status(); | |
5858 | del_op.params.obj_owner = s->owner; | |
5859 | ||
5860 | op_ret = del_op.delete_obj(); | |
5861 | if (op_ret == -ENOENT) { | |
5862 | op_ret = 0; | |
5863 | } | |
5864 | ||
5865 | send_partial_response(*iter, del_op.result.delete_marker, | |
5866 | del_op.result.version_id, op_ret); | |
5867 | } | |
5868 | ||
5869 | /* set the return code to zero, errors at this point will be | |
5870 | dumped to the response */ | |
5871 | op_ret = 0; | |
5872 | ||
5873 | done: | |
5874 | // will likely segfault if begin_response() has not been called | |
5875 | end_response(); | |
5876 | free(data); | |
5877 | return; | |
5878 | ||
5879 | error: | |
5880 | send_status(); | |
5881 | free(data); | |
5882 | return; | |
5883 | ||
5884 | } | |
5885 | ||
5886 | bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo, | |
5887 | map<string, bufferlist>& battrs, | |
5888 | ACLOwner& bucket_owner /* out */) | |
5889 | { | |
5890 | RGWAccessControlPolicy bacl(store->ctx()); | |
5891 | int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket); | |
5892 | if (ret < 0) { | |
5893 | return false; | |
5894 | } | |
5895 | ||
5896 | auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant); | |
5897 | ||
5898 | bucket_owner = bacl.get_owner(); | |
5899 | ||
5900 | /* We can use global user_acl because each BulkDelete request is allowed | |
5901 | * to work on entities from a single account only. */ | |
5902 | return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(), | |
5903 | &bacl, policy, rgw::IAM::s3DeleteBucket); | |
5904 | } | |
5905 | ||
5906 | bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path) | |
5907 | { | |
5908 | auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
5909 | ||
5910 | RGWBucketInfo binfo; | |
5911 | map<string, bufferlist> battrs; | |
5912 | ACLOwner bowner; | |
5913 | ||
5914 | int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant, | |
5915 | path.bucket_name, binfo, nullptr, | |
5916 | &battrs); | |
5917 | if (ret < 0) { | |
5918 | goto binfo_fail; | |
5919 | } | |
5920 | ||
5921 | if (!verify_permission(binfo, battrs, bowner)) { | |
5922 | ret = -EACCES; | |
5923 | goto auth_fail; | |
5924 | } | |
5925 | ||
5926 | if (!path.obj_key.empty()) { | |
5927 | rgw_obj obj(binfo.bucket, path.obj_key); | |
5928 | obj_ctx.obj.set_atomic(obj); | |
5929 | ||
5930 | RGWRados::Object del_target(store, binfo, obj_ctx, obj); | |
5931 | RGWRados::Object::Delete del_op(&del_target); | |
5932 | ||
5933 | del_op.params.bucket_owner = binfo.owner; | |
5934 | del_op.params.versioning_status = binfo.versioning_status(); | |
5935 | del_op.params.obj_owner = bowner; | |
5936 | ||
5937 | ret = del_op.delete_obj(); | |
5938 | if (ret < 0) { | |
5939 | goto delop_fail; | |
5940 | } | |
5941 | } else { | |
5942 | RGWObjVersionTracker ot; | |
5943 | ot.read_version = binfo.ep_objv; | |
5944 | ||
5945 | ret = store->delete_bucket(binfo, ot); | |
5946 | if (0 == ret) { | |
5947 | ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant, | |
5948 | binfo.bucket.name, false); | |
5949 | if (ret < 0) { | |
5950 | ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret | |
5951 | << dendl; | |
5952 | } | |
5953 | } | |
5954 | if (ret < 0) { | |
5955 | goto delop_fail; | |
5956 | } | |
5957 | ||
5958 | if (!store->is_meta_master()) { | |
5959 | bufferlist in_data; | |
5960 | ret = forward_request_to_master(s, &ot.read_version, store, in_data, | |
5961 | nullptr); | |
5962 | if (ret < 0) { | |
5963 | if (ret == -ENOENT) { | |
5964 | /* adjust error, we want to return with NoSuchBucket and not | |
5965 | * NoSuchKey */ | |
5966 | ret = -ERR_NO_SUCH_BUCKET; | |
5967 | } | |
5968 | goto delop_fail; | |
5969 | } | |
5970 | } | |
5971 | } | |
5972 | ||
5973 | num_deleted++; | |
5974 | return true; | |
5975 | ||
5976 | ||
5977 | binfo_fail: | |
5978 | if (-ENOENT == ret) { | |
5979 | ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl; | |
5980 | num_unfound++; | |
5981 | } else { | |
5982 | ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret | |
5983 | << dendl; | |
5984 | ||
5985 | fail_desc_t failed_item = { | |
5986 | .err = ret, | |
5987 | .path = path | |
5988 | }; | |
5989 | failures.push_back(failed_item); | |
5990 | } | |
5991 | return false; | |
5992 | ||
5993 | auth_fail: | |
5994 | ldout(store->ctx(), 20) << "wrong auth for " << path << dendl; | |
5995 | { | |
5996 | fail_desc_t failed_item = { | |
5997 | .err = ret, | |
5998 | .path = path | |
5999 | }; | |
6000 | failures.push_back(failed_item); | |
6001 | } | |
6002 | return false; | |
6003 | ||
6004 | delop_fail: | |
6005 | if (-ENOENT == ret) { | |
6006 | ldout(store->ctx(), 20) << "cannot find entry " << path << dendl; | |
6007 | num_unfound++; | |
6008 | } else { | |
6009 | fail_desc_t failed_item = { | |
6010 | .err = ret, | |
6011 | .path = path | |
6012 | }; | |
6013 | failures.push_back(failed_item); | |
6014 | } | |
6015 | return false; | |
6016 | } | |
6017 | ||
6018 | bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths) | |
6019 | { | |
6020 | ldout(store->ctx(), 20) << "in delete_chunk" << dendl; | |
6021 | for (auto path : paths) { | |
6022 | ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl; | |
6023 | delete_single(path); | |
6024 | } | |
6025 | ||
6026 | return true; | |
6027 | } | |
6028 | ||
6029 | int RGWBulkDelete::verify_permission() | |
6030 | { | |
6031 | return 0; | |
6032 | } | |
6033 | ||
6034 | void RGWBulkDelete::pre_exec() | |
6035 | { | |
6036 | rgw_bucket_object_pre_exec(s); | |
6037 | } | |
6038 | ||
6039 | void RGWBulkDelete::execute() | |
6040 | { | |
6041 | deleter = std::unique_ptr<Deleter>(new Deleter(store, s)); | |
6042 | ||
6043 | bool is_truncated = false; | |
6044 | do { | |
6045 | list<RGWBulkDelete::acct_path_t> items; | |
6046 | ||
6047 | int ret = get_data(items, &is_truncated); | |
6048 | if (ret < 0) { | |
6049 | return; | |
6050 | } | |
6051 | ||
6052 | ret = deleter->delete_chunk(items); | |
6053 | } while (!op_ret && is_truncated); | |
6054 | ||
6055 | return; | |
6056 | } | |
6057 | ||
6058 | ||
6059 | constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors; | |
6060 | ||
6061 | int RGWBulkUploadOp::verify_permission() | |
6062 | { | |
6063 | if (s->auth.identity->is_anonymous()) { | |
6064 | return -EACCES; | |
6065 | } | |
6066 | ||
6067 | if (! verify_user_permission(s, RGW_PERM_WRITE)) { | |
6068 | return -EACCES; | |
6069 | } | |
6070 | ||
6071 | if (s->user->user_id.tenant != s->bucket_tenant) { | |
6072 | ldout(s->cct, 10) << "user cannot create a bucket in a different tenant" | |
6073 | << " (user_id.tenant=" << s->user->user_id.tenant | |
6074 | << " requested=" << s->bucket_tenant << ")" | |
6075 | << dendl; | |
6076 | return -EACCES; | |
6077 | } | |
6078 | ||
6079 | if (s->user->max_buckets < 0) { | |
6080 | return -EPERM; | |
6081 | } | |
6082 | ||
6083 | return 0; | |
6084 | } | |
6085 | ||
6086 | void RGWBulkUploadOp::pre_exec() | |
6087 | { | |
6088 | rgw_bucket_object_pre_exec(s); | |
6089 | } | |
6090 | ||
6091 | boost::optional<std::pair<std::string, rgw_obj_key>> | |
6092 | RGWBulkUploadOp::parse_path(const boost::string_ref& path) | |
6093 | { | |
6094 | /* We need to skip all slashes at the beginning in order to preserve | |
6095 | * compliance with Swift. */ | |
6096 | const size_t start_pos = path.find_first_not_of('/'); | |
6097 | ||
6098 | if (boost::string_ref::npos != start_pos) { | |
6099 | /* Seperator is the first slash after the leading ones. */ | |
6100 | const size_t sep_pos = path.substr(start_pos).find('/'); | |
6101 | ||
6102 | if (boost::string_ref::npos != sep_pos) { | |
6103 | const auto bucket_name = path.substr(start_pos, sep_pos - start_pos); | |
6104 | const auto obj_name = path.substr(sep_pos + 1); | |
6105 | ||
6106 | return std::make_pair(bucket_name.to_string(), | |
6107 | rgw_obj_key(obj_name.to_string())); | |
6108 | } else { | |
6109 | /* It's guaranteed here that bucket name is at least one character | |
6110 | * long and is different than slash. */ | |
6111 | return std::make_pair(path.substr(start_pos).to_string(), | |
6112 | rgw_obj_key()); | |
6113 | } | |
6114 | } | |
6115 | ||
6116 | return none; | |
6117 | } | |
6118 | ||
6119 | std::pair<std::string, std::string> | |
6120 | RGWBulkUploadOp::handle_upload_path(struct req_state *s) | |
6121 | { | |
6122 | std::string bucket_path, file_prefix; | |
6123 | if (! s->init_state.url_bucket.empty()) { | |
6124 | file_prefix = bucket_path = s->init_state.url_bucket + "/"; | |
6125 | if (! s->object.empty()) { | |
6126 | std::string& object_name = s->object.name; | |
6127 | ||
6128 | /* As rgw_obj_key::empty() already verified emptiness of s->object.name, | |
6129 | * we can safely examine its last element. */ | |
6130 | if (object_name.back() == '/') { | |
6131 | file_prefix.append(object_name); | |
6132 | } else { | |
6133 | file_prefix.append(object_name).append("/"); | |
6134 | } | |
6135 | } | |
6136 | } | |
6137 | return std::make_pair(bucket_path, file_prefix); | |
6138 | } | |
6139 | ||
6140 | int RGWBulkUploadOp::handle_dir_verify_permission() | |
6141 | { | |
6142 | if (s->user->max_buckets > 0) { | |
6143 | RGWUserBuckets buckets; | |
6144 | std::string marker; | |
6145 | bool is_truncated = false; | |
6146 | op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, | |
6147 | marker, std::string(), s->user->max_buckets, | |
6148 | false, &is_truncated); | |
6149 | if (op_ret < 0) { | |
6150 | return op_ret; | |
6151 | } | |
6152 | ||
6153 | if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) { | |
6154 | return -ERR_TOO_MANY_BUCKETS; | |
6155 | } | |
6156 | } | |
6157 | ||
6158 | return 0; | |
6159 | } | |
6160 | ||
6161 | static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name) | |
6162 | { | |
6163 | /* the request of container or object level will contain bucket name. | |
6164 | * only at account level need to append the bucket name */ | |
6165 | if (info.script_uri.find(bucket_name) != std::string::npos) { | |
6166 | return; | |
6167 | } | |
6168 | ||
6169 | ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl; | |
6170 | info.script_uri.append("/").append(bucket_name); | |
6171 | info.request_uri_aws4 = info.request_uri = info.script_uri; | |
6172 | info.effective_uri = "/" + bucket_name; | |
6173 | } | |
6174 | ||
6175 | int RGWBulkUploadOp::handle_dir(const boost::string_ref path) | |
6176 | { | |
6177 | ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl; | |
6178 | ||
6179 | op_ret = handle_dir_verify_permission(); | |
6180 | if (op_ret < 0) { | |
6181 | return op_ret; | |
6182 | } | |
6183 | ||
6184 | std::string bucket_name; | |
6185 | rgw_obj_key object_junk; | |
6186 | std::tie(bucket_name, object_junk) = *parse_path(path); | |
6187 | ||
6188 | rgw_raw_obj obj(store->get_zone_params().domain_root, | |
6189 | rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name)); | |
6190 | ||
6191 | /* we need to make sure we read bucket info, it's not read before for this | |
6192 | * specific request */ | |
6193 | RGWBucketInfo binfo; | |
6194 | std::map<std::string, ceph::bufferlist> battrs; | |
6195 | op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name, | |
6196 | binfo, NULL, &battrs); | |
6197 | if (op_ret < 0 && op_ret != -ENOENT) { | |
6198 | return op_ret; | |
6199 | } | |
6200 | const bool bucket_exists = (op_ret != -ENOENT); | |
6201 | ||
6202 | if (bucket_exists) { | |
6203 | RGWAccessControlPolicy old_policy(s->cct); | |
6204 | int r = get_bucket_policy_from_attr(s->cct, store, binfo, | |
6205 | battrs, &old_policy); | |
6206 | if (r >= 0) { | |
6207 | if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) { | |
6208 | op_ret = -EEXIST; | |
6209 | return op_ret; | |
6210 | } | |
6211 | } | |
6212 | } | |
6213 | ||
6214 | RGWBucketInfo master_info; | |
6215 | rgw_bucket *pmaster_bucket = nullptr; | |
6216 | uint32_t *pmaster_num_shards = nullptr; | |
6217 | real_time creation_time; | |
6218 | obj_version objv, ep_objv, *pobjv = nullptr; | |
6219 | ||
6220 | if (! store->is_meta_master()) { | |
6221 | JSONParser jp; | |
6222 | ceph::bufferlist in_data; | |
6223 | req_info info = s->info; | |
6224 | forward_req_info(s->cct, info, bucket_name); | |
6225 | op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info); | |
6226 | if (op_ret < 0) { | |
6227 | return op_ret; | |
6228 | } | |
6229 | ||
6230 | JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp); | |
6231 | JSONDecoder::decode_json("object_ver", objv, &jp); | |
6232 | JSONDecoder::decode_json("bucket_info", master_info, &jp); | |
6233 | ||
6234 | ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" | |
6235 | << objv.ver << dendl; | |
6236 | ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time | |
6237 | << dendl; | |
6238 | ||
6239 | pmaster_bucket= &master_info.bucket; | |
6240 | creation_time = master_info.creation_time; | |
6241 | pmaster_num_shards = &master_info.num_shards; | |
6242 | pobjv = &objv; | |
6243 | } else { | |
6244 | pmaster_bucket = nullptr; | |
6245 | pmaster_num_shards = nullptr; | |
6246 | } | |
6247 | ||
6248 | ||
6249 | std::string placement_rule; | |
6250 | if (bucket_exists) { | |
6251 | std::string selected_placement_rule; | |
6252 | rgw_bucket bucket; | |
6253 | bucket.tenant = s->bucket_tenant; | |
6254 | bucket.name = s->bucket_name; | |
6255 | op_ret = store->select_bucket_placement(*(s->user), | |
6256 | store->get_zonegroup().get_id(), | |
6257 | placement_rule, | |
6258 | &selected_placement_rule, | |
6259 | nullptr); | |
6260 | if (selected_placement_rule != binfo.placement_rule) { | |
6261 | op_ret = -EEXIST; | |
6262 | ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl; | |
6263 | return op_ret; | |
6264 | } | |
6265 | } | |
6266 | ||
6267 | /* Create metadata: ACLs. */ | |
6268 | std::map<std::string, ceph::bufferlist> attrs; | |
6269 | RGWAccessControlPolicy policy; | |
6270 | policy.create_default(s->user->user_id, s->user->display_name); | |
6271 | ceph::bufferlist aclbl; | |
6272 | policy.encode(aclbl); | |
6273 | attrs.emplace(RGW_ATTR_ACL, std::move(aclbl)); | |
6274 | ||
6275 | RGWQuotaInfo quota_info; | |
6276 | const RGWQuotaInfo * pquota_info = nullptr; | |
6277 | ||
6278 | rgw_bucket bucket; | |
6279 | bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */ | |
6280 | bucket.name = bucket_name; | |
6281 | ||
6282 | ||
6283 | RGWBucketInfo out_info; | |
6284 | op_ret = store->create_bucket(*(s->user), | |
6285 | bucket, | |
6286 | store->get_zonegroup().get_id(), | |
6287 | placement_rule, binfo.swift_ver_location, | |
6288 | pquota_info, attrs, | |
6289 | out_info, pobjv, &ep_objv, creation_time, | |
6290 | pmaster_bucket, pmaster_num_shards, true); | |
6291 | /* continue if EEXIST and create_bucket will fail below. this way we can | |
6292 | * recover from a partial create by retrying it. */ | |
6293 | ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret | |
6294 | << ", bucket=" << bucket << dendl; | |
6295 | ||
6296 | if (op_ret && op_ret != -EEXIST) { | |
6297 | return op_ret; | |
6298 | } | |
6299 | ||
6300 | const bool existed = (op_ret == -EEXIST); | |
6301 | if (existed) { | |
6302 | /* bucket already existed, might have raced with another bucket creation, or | |
6303 | * might be partial bucket creation that never completed. Read existing bucket | |
6304 | * info, verify that the reported bucket owner is the current user. | |
6305 | * If all is ok then update the user's list of buckets. | |
6306 | * Otherwise inform client about a name conflict. | |
6307 | */ | |
6308 | if (out_info.owner.compare(s->user->user_id) != 0) { | |
6309 | op_ret = -EEXIST; | |
6310 | ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl; | |
6311 | return op_ret; | |
6312 | } | |
6313 | bucket = out_info.bucket; | |
6314 | } | |
6315 | ||
6316 | op_ret = rgw_link_bucket(store, s->user->user_id, bucket, | |
6317 | out_info.creation_time, false); | |
6318 | if (op_ret && !existed && op_ret != -EEXIST) { | |
6319 | /* if it exists (or previously existed), don't remove it! */ | |
6320 | op_ret = rgw_unlink_bucket(store, s->user->user_id, | |
6321 | bucket.tenant, bucket.name); | |
6322 | if (op_ret < 0) { | |
6323 | ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret=" | |
6324 | << op_ret << dendl; | |
6325 | } | |
6326 | } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) { | |
6327 | ldout(s->cct, 20) << "bulk upload: containers already exists" | |
6328 | << dendl; | |
6329 | op_ret = -ERR_BUCKET_EXISTS; | |
6330 | } | |
6331 | ||
6332 | return op_ret; | |
6333 | } | |
6334 | ||
6335 | ||
6336 | bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, | |
6337 | const rgw_obj& obj, | |
6338 | std::map<std::string, ceph::bufferlist>& battrs, | |
6339 | ACLOwner& bucket_owner /* out */) | |
6340 | { | |
6341 | RGWAccessControlPolicy bacl(store->ctx()); | |
6342 | op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket); | |
6343 | if (op_ret < 0) { | |
6344 | ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket" | |
6345 | << dendl; | |
6346 | return false; | |
6347 | } | |
6348 | ||
6349 | auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant); | |
6350 | ||
6351 | bucket_owner = bacl.get_owner(); | |
6352 | if (policy) { | |
6353 | auto e = policy->eval(s->env, *s->auth.identity, | |
6354 | rgw::IAM::s3PutObject, obj); | |
6355 | if (e == Effect::Allow) { | |
6356 | return true; | |
6357 | } else if (e == Effect::Deny) { | |
6358 | return false; | |
6359 | } | |
6360 | } | |
6361 | ||
6362 | return verify_bucket_permission_no_policy(s, s->user_acl.get(), | |
6363 | &bacl, RGW_PERM_WRITE); | |
6364 | } | |
6365 | ||
6366 | int RGWBulkUploadOp::handle_file(const boost::string_ref path, | |
6367 | const size_t size, | |
6368 | AlignedStreamGetter& body) | |
6369 | { | |
6370 | ||
6371 | ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size | |
6372 | << dendl; | |
6373 | ||
6374 | RGWPutObjDataProcessor *filter = nullptr; | |
6375 | boost::optional<RGWPutObj_Compress> compressor; | |
6376 | ||
6377 | if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) { | |
6378 | op_ret = -ERR_TOO_LARGE; | |
6379 | return op_ret; | |
6380 | } | |
6381 | ||
6382 | std::string bucket_name; | |
6383 | rgw_obj_key object; | |
6384 | std::tie(bucket_name, object) = *parse_path(path); | |
6385 | ||
6386 | auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx); | |
6387 | RGWBucketInfo binfo; | |
6388 | std::map<std::string, ceph::bufferlist> battrs; | |
6389 | ACLOwner bowner; | |
6390 | op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant, | |
6391 | bucket_name, binfo, nullptr, &battrs); | |
6392 | if (op_ret == -ENOENT) { | |
6393 | ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name | |
6394 | << dendl; | |
6395 | } else if (op_ret < 0) { | |
6396 | return op_ret; | |
6397 | } | |
6398 | ||
6399 | if (! handle_file_verify_permission(binfo, | |
6400 | rgw_obj(binfo.bucket, object), | |
6401 | battrs, bowner)) { | |
6402 | ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl; | |
6403 | op_ret = -EACCES; | |
6404 | return op_ret; | |
6405 | } | |
6406 | ||
6407 | op_ret = store->check_quota(bowner.get_id(), binfo.bucket, | |
6408 | user_quota, bucket_quota, size); | |
6409 | if (op_ret < 0) { | |
6410 | return op_ret; | |
6411 | } | |
6412 | ||
6413 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
6414 | if (op_ret < 0) { | |
6415 | return op_ret; | |
6416 | } | |
6417 | ||
6418 | RGWPutObjProcessor_Atomic processor(obj_ctx, | |
6419 | binfo, | |
6420 | binfo.bucket, | |
6421 | object.name, | |
6422 | /* part size */ | |
6423 | s->cct->_conf->rgw_obj_stripe_size, | |
6424 | s->req_id, | |
6425 | binfo.versioning_enabled()); | |
6426 | ||
6427 | /* No filters by default. */ | |
6428 | filter = &processor; | |
6429 | ||
6430 | op_ret = processor.prepare(store, nullptr); | |
6431 | if (op_ret < 0) { | |
6432 | ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret=" | |
6433 | << op_ret << dendl; | |
6434 | return op_ret; | |
6435 | } | |
6436 | ||
6437 | const auto& compression_type = store->get_zone_params().get_compression_type( | |
6438 | binfo.placement_rule); | |
6439 | CompressorRef plugin; | |
6440 | if (compression_type != "none") { | |
6441 | plugin = Compressor::create(s->cct, compression_type); | |
6442 | if (! plugin) { | |
6443 | ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type " | |
6444 | << compression_type << dendl; | |
6445 | } else { | |
6446 | compressor.emplace(s->cct, plugin, filter); | |
6447 | filter = &*compressor; | |
6448 | } | |
6449 | } | |
6450 | ||
6451 | /* Upload file content. */ | |
6452 | ssize_t len = 0; | |
6453 | size_t ofs = 0; | |
6454 | MD5 hash; | |
6455 | do { | |
6456 | ceph::bufferlist data; | |
6457 | len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data); | |
6458 | ||
6459 | ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl; | |
6460 | if (len < 0) { | |
6461 | op_ret = len; | |
6462 | return op_ret; | |
6463 | } else if (len > 0) { | |
6464 | hash.Update((const byte *)data.c_str(), data.length()); | |
6465 | op_ret = put_data_and_throttle(filter, data, ofs, false); | |
6466 | if (op_ret < 0) { | |
6467 | ldout(s->cct, 20) << "processor->thottle_data() returned ret=" | |
6468 | << op_ret << dendl; | |
6469 | return op_ret; | |
6470 | } | |
6471 | ||
6472 | ofs += len; | |
6473 | } | |
6474 | ||
6475 | } while (len > 0); | |
6476 | ||
6477 | if (ofs != size) { | |
6478 | ldout(s->cct, 10) << "bulk upload: real file size different from declared" | |
6479 | << dendl; | |
6480 | op_ret = -EINVAL; | |
6481 | } | |
6482 | ||
6483 | op_ret = store->check_quota(bowner.get_id(), binfo.bucket, | |
6484 | user_quota, bucket_quota, size); | |
6485 | if (op_ret < 0) { | |
6486 | ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path | |
6487 | << dendl; | |
6488 | return op_ret; | |
6489 | } | |
6490 | ||
6491 | op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); | |
6492 | if (op_ret < 0) { | |
6493 | return op_ret; | |
6494 | } | |
6495 | ||
6496 | char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; | |
6497 | unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
6498 | hash.Final(m); | |
6499 | buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); | |
6500 | ||
6501 | /* Create metadata: ETAG. */ | |
6502 | std::map<std::string, ceph::bufferlist> attrs; | |
6503 | std::string etag = calc_md5; | |
6504 | ceph::bufferlist etag_bl; | |
6505 | etag_bl.append(etag.c_str(), etag.size() + 1); | |
6506 | attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl)); | |
6507 | ||
6508 | /* Create metadata: ACLs. */ | |
6509 | RGWAccessControlPolicy policy; | |
6510 | policy.create_default(s->user->user_id, s->user->display_name); | |
6511 | ceph::bufferlist aclbl; | |
6512 | policy.encode(aclbl); | |
6513 | attrs.emplace(RGW_ATTR_ACL, std::move(aclbl)); | |
6514 | ||
6515 | /* Create metadata: compression info. */ | |
6516 | if (compressor && compressor->is_compressed()) { | |
6517 | ceph::bufferlist tmp; | |
6518 | RGWCompressionInfo cs_info; | |
6519 | cs_info.compression_type = plugin->get_type_name(); | |
6520 | cs_info.orig_size = s->obj_size; | |
6521 | cs_info.blocks = std::move(compressor->get_compression_blocks()); | |
6522 | ::encode(cs_info, tmp); | |
6523 | attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp)); | |
6524 | } | |
6525 | ||
6526 | /* Complete the transaction. */ | |
6527 | op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs, | |
6528 | ceph::real_time() /* delete_at */); | |
6529 | if (op_ret < 0) { | |
6530 | ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret=" | |
6531 | << op_ret << dendl; | |
6532 | } | |
6533 | ||
6534 | return op_ret; | |
6535 | } | |
6536 | ||
6537 | void RGWBulkUploadOp::execute() | |
6538 | { | |
6539 | ceph::bufferlist buffer(64 * 1024); | |
6540 | ||
6541 | ldout(s->cct, 20) << "bulk upload: start" << dendl; | |
6542 | ||
6543 | /* Create an instance of stream-abstracting class. Having this indirection | |
6544 | * allows for easy introduction of decompressors like gzip and bzip2. */ | |
6545 | auto stream = create_stream(); | |
6546 | if (! stream) { | |
6547 | return; | |
6548 | } | |
6549 | ||
6550 | /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See: | |
6551 | * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */ | |
6552 | std::string bucket_path, file_prefix; | |
6553 | std::tie(bucket_path, file_prefix) = handle_upload_path(s); | |
6554 | ||
6555 | auto status = rgw::tar::StatusIndicator::create(); | |
6556 | do { | |
6557 | op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer); | |
6558 | if (op_ret < 0) { | |
6559 | ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl; | |
6560 | return; | |
6561 | } | |
6562 | ||
6563 | /* We need to re-interpret the buffer as a TAR block. Exactly two blocks | |
6564 | * must be tracked to detect out end-of-archive. It occurs when both of | |
6565 | * them are empty (zeroed). Tracing this particular inter-block dependency | |
6566 | * is responsibility of the rgw::tar::StatusIndicator class. */ | |
6567 | boost::optional<rgw::tar::HeaderView> header; | |
6568 | std::tie(status, header) = rgw::tar::interpret_block(status, buffer); | |
6569 | ||
6570 | if (! status.empty() && header) { | |
6571 | /* This specific block isn't empty (entirely zeroed), so we can parse | |
6572 | * it as a TAR header and dispatch. At the moment we do support only | |
6573 | * regular files and directories. Everything else (symlinks, devices) | |
6574 | * will be ignored but won't cease the whole upload. */ | |
6575 | switch (header->get_filetype()) { | |
6576 | case rgw::tar::FileType::NORMAL_FILE: { | |
6577 | ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl; | |
6578 | ||
6579 | boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \ | |
6580 | file_prefix + header->get_filename().to_string(); | |
6581 | auto body = AlignedStreamGetter(0, header->get_filesize(), | |
6582 | rgw::tar::BLOCK_SIZE, *stream); | |
6583 | op_ret = handle_file(filename, | |
6584 | header->get_filesize(), | |
6585 | body); | |
6586 | if (! op_ret) { | |
6587 | /* Only regular files counts. */ | |
6588 | num_created++; | |
6589 | } else { | |
6590 | failures.emplace_back(op_ret, filename.to_string()); | |
6591 | } | |
6592 | break; | |
6593 | } | |
6594 | case rgw::tar::FileType::DIRECTORY: { | |
6595 | ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl; | |
6596 | ||
6597 | boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path; | |
6598 | op_ret = handle_dir(dirname); | |
6599 | if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) { | |
6600 | failures.emplace_back(op_ret, dirname.to_string()); | |
6601 | } | |
6602 | break; | |
6603 | } | |
6604 | default: { | |
6605 | /* Not recognized. Skip. */ | |
6606 | op_ret = 0; | |
6607 | break; | |
6608 | } | |
6609 | } | |
6610 | ||
6611 | /* In case of any problems with sub-request authorization Swift simply | |
6612 | * terminates whole upload immediately. */ | |
6613 | if (boost::algorithm::contains(std::initializer_list<int>{ op_ret }, | |
6614 | terminal_errors)) { | |
6615 | ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret | |
6616 | << dendl; | |
6617 | break; | |
6618 | } | |
6619 | } else { | |
6620 | ldout(s->cct, 2) << "bulk upload: an empty block" << dendl; | |
6621 | op_ret = 0; | |
6622 | } | |
6623 | ||
6624 | buffer.clear(); | |
6625 | } while (! status.eof()); | |
6626 | ||
6627 | return; | |
6628 | } | |
6629 | ||
6630 | RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter() | |
6631 | { | |
6632 | const size_t aligned_legnth = length + (-length % alignment); | |
6633 | ceph::bufferlist junk; | |
6634 | ||
6635 | DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk); | |
6636 | } | |
6637 | ||
6638 | ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want, | |
6639 | ceph::bufferlist& dst) | |
6640 | { | |
6641 | const size_t max_to_read = std::min(want, length - position); | |
6642 | const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst); | |
6643 | if (len > 0) { | |
6644 | position += len; | |
6645 | } | |
6646 | return len; | |
6647 | } | |
6648 | ||
6649 | ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want, | |
6650 | ceph::bufferlist& dst) | |
6651 | { | |
6652 | const auto len = DecoratedStreamGetter::get_exactly(want, dst); | |
6653 | if (len > 0) { | |
6654 | position += len; | |
6655 | } | |
6656 | return len; | |
6657 | } | |
6658 | ||
6659 | int RGWSetAttrs::verify_permission() | |
6660 | { | |
6661 | // This looks to be part of the RGW-NFS machinery and has no S3 or | |
6662 | // Swift equivalent. | |
6663 | bool perm; | |
6664 | if (!s->object.empty()) { | |
6665 | perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE); | |
6666 | } else { | |
6667 | perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE); | |
6668 | } | |
6669 | if (!perm) | |
6670 | return -EACCES; | |
6671 | ||
6672 | return 0; | |
6673 | } | |
6674 | ||
6675 | void RGWSetAttrs::pre_exec() | |
6676 | { | |
6677 | rgw_bucket_object_pre_exec(s); | |
6678 | } | |
6679 | ||
6680 | void RGWSetAttrs::execute() | |
6681 | { | |
6682 | op_ret = get_params(); | |
6683 | if (op_ret < 0) | |
6684 | return; | |
6685 | ||
6686 | rgw_obj obj(s->bucket, s->object); | |
6687 | ||
6688 | if (!s->object.empty()) { | |
6689 | store->set_atomic(s->obj_ctx, obj); | |
6690 | op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr); | |
6691 | } else { | |
6692 | for (auto& iter : attrs) { | |
6693 | s->bucket_attrs[iter.first] = std::move(iter.second); | |
6694 | } | |
6695 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs, | |
6696 | &s->bucket_info.objv_tracker); | |
6697 | } | |
6698 | } | |
6699 | ||
6700 | void RGWGetObjLayout::pre_exec() | |
6701 | { | |
6702 | rgw_bucket_object_pre_exec(s); | |
6703 | } | |
6704 | ||
6705 | void RGWGetObjLayout::execute() | |
6706 | { | |
6707 | rgw_obj obj(s->bucket, s->object); | |
6708 | RGWRados::Object target(store, | |
6709 | s->bucket_info, | |
6710 | *static_cast<RGWObjectCtx *>(s->obj_ctx), | |
6711 | rgw_obj(s->bucket, s->object)); | |
6712 | RGWRados::Object::Read stat_op(&target); | |
6713 | ||
6714 | op_ret = stat_op.prepare(); | |
6715 | if (op_ret < 0) { | |
6716 | return; | |
6717 | } | |
6718 | ||
6719 | head_obj = stat_op.state.head_obj; | |
6720 | ||
6721 | op_ret = target.get_manifest(&manifest); | |
6722 | } | |
6723 | ||
6724 | ||
6725 | int RGWConfigBucketMetaSearch::verify_permission() | |
6726 | { | |
6727 | if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { | |
6728 | return -EACCES; | |
6729 | } | |
6730 | ||
6731 | return 0; | |
6732 | } | |
6733 | ||
6734 | void RGWConfigBucketMetaSearch::pre_exec() | |
6735 | { | |
6736 | rgw_bucket_object_pre_exec(s); | |
6737 | } | |
6738 | ||
6739 | void RGWConfigBucketMetaSearch::execute() | |
6740 | { | |
6741 | op_ret = get_params(); | |
6742 | if (op_ret < 0) { | |
6743 | ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl; | |
6744 | return; | |
6745 | } | |
6746 | ||
6747 | s->bucket_info.mdsearch_config = mdsearch_config; | |
6748 | ||
6749 | op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs); | |
6750 | if (op_ret < 0) { | |
6751 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; | |
6752 | return; | |
6753 | } | |
6754 | } | |
6755 | ||
6756 | int RGWGetBucketMetaSearch::verify_permission() | |
6757 | { | |
6758 | if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { | |
6759 | return -EACCES; | |
6760 | } | |
6761 | ||
6762 | return 0; | |
6763 | } | |
6764 | ||
6765 | void RGWGetBucketMetaSearch::pre_exec() | |
6766 | { | |
6767 | rgw_bucket_object_pre_exec(s); | |
6768 | } | |
6769 | ||
6770 | int RGWDelBucketMetaSearch::verify_permission() | |
6771 | { | |
6772 | if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { | |
6773 | return -EACCES; | |
6774 | } | |
6775 | ||
6776 | return 0; | |
6777 | } | |
6778 | ||
6779 | void RGWDelBucketMetaSearch::pre_exec() | |
6780 | { | |
6781 | rgw_bucket_object_pre_exec(s); | |
6782 | } | |
6783 | ||
6784 | void RGWDelBucketMetaSearch::execute() | |
6785 | { | |
6786 | s->bucket_info.mdsearch_config.clear(); | |
6787 | ||
6788 | op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs); | |
6789 | if (op_ret < 0) { | |
6790 | ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; | |
6791 | return; | |
6792 | } | |
6793 | } | |
6794 | ||
6795 | ||
6796 | RGWHandler::~RGWHandler() | |
6797 | { | |
6798 | } | |
6799 | ||
6800 | int RGWHandler::init(RGWRados *_store, | |
6801 | struct req_state *_s, | |
6802 | rgw::io::BasicClient *cio) | |
6803 | { | |
6804 | store = _store; | |
6805 | s = _s; | |
6806 | ||
6807 | return 0; | |
6808 | } | |
6809 | ||
6810 | int RGWHandler::do_init_permissions() | |
6811 | { | |
6812 | int ret = rgw_build_bucket_policies(store, s); | |
6813 | s->env = rgw_build_iam_environment(store, s); | |
6814 | ||
6815 | if (ret < 0) { | |
6816 | ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl; | |
6817 | if (ret == -ENODATA) | |
6818 | ret = -EACCES; | |
6819 | } | |
6820 | ||
6821 | return ret; | |
6822 | } | |
6823 | ||
6824 | int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket) | |
6825 | { | |
6826 | if (only_bucket) { | |
6827 | /* already read bucket info */ | |
6828 | return 0; | |
6829 | } | |
6830 | int ret = rgw_build_object_policies(store, s, op->prefetch_data()); | |
6831 | ||
6832 | if (ret < 0) { | |
6833 | ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":" | |
6834 | << s->object << " only_bucket=" << only_bucket | |
6835 | << " ret=" << ret << dendl; | |
6836 | if (ret == -ENODATA) | |
6837 | ret = -EACCES; | |
6838 | } | |
6839 | ||
6840 | return ret; | |
6841 | } | |
6842 | ||
6843 | int RGWOp::error_handler(int err_no, string *error_content) { | |
6844 | return dialect_handler->error_handler(err_no, error_content); | |
6845 | } | |
6846 | ||
6847 | int RGWHandler::error_handler(int err_no, string *error_content) { | |
6848 | // This is the do-nothing error handler | |
6849 | return err_no; | |
6850 | } | |
6851 | ||
6852 | ||
6853 | void RGWPutBucketPolicy::send_response() | |
6854 | { | |
6855 | if (op_ret) { | |
6856 | set_req_state_err(s, op_ret); | |
6857 | } | |
6858 | dump_errno(s); | |
6859 | end_header(s); | |
6860 | } | |
6861 | ||
6862 | int RGWPutBucketPolicy::verify_permission() | |
6863 | { | |
6864 | if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) { | |
6865 | return -EACCES; | |
6866 | } | |
6867 | ||
6868 | return 0; | |
6869 | } | |
6870 | ||
6871 | int RGWPutBucketPolicy::get_params() | |
6872 | { | |
6873 | const auto max_size = s->cct->_conf->rgw_max_put_param_size; | |
6874 | // At some point when I have more time I want to make a version of | |
6875 | // rgw_rest_read_all_input that doesn't use malloc. | |
6876 | op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false); | |
6877 | // And throws exceptions. | |
6878 | return op_ret; | |
6879 | } | |
6880 | ||
6881 | void RGWPutBucketPolicy::execute() | |
6882 | { | |
6883 | op_ret = get_params(); | |
6884 | if (op_ret < 0) { | |
6885 | return; | |
6886 | } | |
6887 | ||
6888 | bufferlist in_data = bufferlist::static_from_mem(data, len); | |
6889 | ||
6890 | if (!store->is_meta_master()) { | |
6891 | op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); | |
6892 | if (op_ret < 0) { | |
6893 | ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; | |
6894 | return; | |
6895 | } | |
6896 | } | |
6897 | ||
6898 | try { | |
6899 | const Policy p(s->cct, s->bucket_tenant, in_data); | |
6900 | op_ret = retry_raced_bucket_write(store, s, [&p, this] { | |
6901 | auto attrs = s->bucket_attrs; | |
6902 | attrs[RGW_ATTR_IAM_POLICY].clear(); | |
6903 | attrs[RGW_ATTR_IAM_POLICY].append(p.text); | |
6904 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, | |
6905 | &s->bucket_info.objv_tracker); | |
6906 | return op_ret; | |
6907 | }); | |
6908 | } catch (rgw::IAM::PolicyParseException& e) { | |
6909 | ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; | |
6910 | op_ret = -EINVAL; | |
6911 | } | |
6912 | } | |
6913 | ||
6914 | void RGWGetBucketPolicy::send_response() | |
6915 | { | |
6916 | if (op_ret) { | |
6917 | set_req_state_err(s, op_ret); | |
6918 | } | |
6919 | dump_errno(s); | |
6920 | end_header(s, this, "application/json"); | |
6921 | dump_body(s, policy); | |
6922 | } | |
6923 | ||
6924 | int RGWGetBucketPolicy::verify_permission() | |
6925 | { | |
6926 | if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) { | |
6927 | return -EACCES; | |
6928 | } | |
6929 | ||
6930 | return 0; | |
6931 | } | |
6932 | ||
6933 | void RGWGetBucketPolicy::execute() | |
6934 | { | |
6935 | auto attrs = s->bucket_attrs; | |
6936 | map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY); | |
6937 | if (aiter == attrs.end()) { | |
6938 | ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr" | |
6939 | << " bucket_name = " << s->bucket_name << dendl; | |
6940 | op_ret = -ERR_NO_SUCH_BUCKET_POLICY; | |
6941 | s->err.message = "The bucket policy does not exist"; | |
6942 | return; | |
6943 | } else { | |
6944 | policy = attrs[RGW_ATTR_IAM_POLICY]; | |
6945 | ||
6946 | if (policy.length() == 0) { | |
6947 | ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl; | |
6948 | op_ret = -ERR_NO_SUCH_BUCKET_POLICY; | |
6949 | s->err.message = "The bucket policy does not exist"; | |
6950 | return; | |
6951 | } | |
6952 | } | |
6953 | } | |
6954 | ||
6955 | void RGWDeleteBucketPolicy::send_response() | |
6956 | { | |
6957 | if (op_ret) { | |
6958 | set_req_state_err(s, op_ret); | |
6959 | } | |
6960 | dump_errno(s); | |
6961 | end_header(s); | |
6962 | } | |
6963 | ||
6964 | int RGWDeleteBucketPolicy::verify_permission() | |
6965 | { | |
6966 | if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) { | |
6967 | return -EACCES; | |
6968 | } | |
6969 | ||
6970 | return 0; | |
6971 | } | |
6972 | ||
6973 | void RGWDeleteBucketPolicy::execute() | |
6974 | { | |
6975 | op_ret = retry_raced_bucket_write(store, s, [this] { | |
6976 | auto attrs = s->bucket_attrs; | |
6977 | attrs.erase(RGW_ATTR_IAM_POLICY); | |
6978 | op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, | |
6979 | &s->bucket_info.objv_tracker); | |
6980 | return op_ret; | |
6981 | }); | |
6982 | } |