]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_quota.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rgw / rgw_quota.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 /*
5 * Ceph - scalable distributed file system
6 *
7 * Copyright (C) 2013 Inktank, Inc
8 *
9 * This is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License version 2.1, as published by the Free Software
12 * Foundation. See file COPYING.
13 *
14 */
15
16
17 #include "include/utime.h"
18 #include "common/lru_map.h"
19 #include "common/RefCountedObj.h"
20 #include "common/Thread.h"
21 #include "common/Mutex.h"
22 #include "common/RWLock.h"
23
24 #include "rgw_common.h"
25 #include "rgw_rados.h"
26 #include "rgw_quota.h"
27 #include "rgw_bucket.h"
28 #include "rgw_user.h"
29
30 #include "services/svc_sys_obj.h"
31
32 #include <atomic>
33
34 #define dout_context g_ceph_context
35 #define dout_subsys ceph_subsys_rgw
36
37
38 struct RGWQuotaCacheStats {
39 RGWStorageStats stats;
40 utime_t expiration;
41 utime_t async_refresh_time;
42 };
43
44 template<class T>
45 class RGWQuotaCache {
46 protected:
47 RGWRados *store;
48 lru_map<T, RGWQuotaCacheStats> stats_map;
49 RefCountedWaitObject *async_refcount;
50
51 class StatsAsyncTestSet : public lru_map<T, RGWQuotaCacheStats>::UpdateContext {
52 int objs_delta;
53 uint64_t added_bytes;
54 uint64_t removed_bytes;
55 public:
56 StatsAsyncTestSet() : objs_delta(0), added_bytes(0), removed_bytes(0) {}
57 bool update(RGWQuotaCacheStats *entry) override {
58 if (entry->async_refresh_time.sec() == 0)
59 return false;
60
61 entry->async_refresh_time = utime_t(0, 0);
62
63 return true;
64 }
65 };
66
67 virtual int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats) = 0;
68
69 virtual bool map_find(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
70
71 virtual bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, typename lru_map<T, RGWQuotaCacheStats>::UpdateContext *ctx) = 0;
72 virtual void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
73
74 virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {}
75 public:
76 RGWQuotaCache(RGWRados *_store, int size) : store(_store), stats_map(size) {
77 async_refcount = new RefCountedWaitObject;
78 }
79 virtual ~RGWQuotaCache() {
80 async_refcount->put_wait(); /* wait for all pending async requests to complete */
81 }
82
83 int get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota);
84 void adjust_stats(const rgw_user& user, rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes);
85
86 virtual bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats);
87
88 void set_stats(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, RGWStorageStats& stats);
89 int async_refresh(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs);
90 void async_refresh_response(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats);
91 void async_refresh_fail(const rgw_user& user, rgw_bucket& bucket);
92
93 class AsyncRefreshHandler {
94 protected:
95 RGWRados *store;
96 RGWQuotaCache<T> *cache;
97 public:
98 AsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
99 virtual ~AsyncRefreshHandler() {}
100
101 virtual int init_fetch() = 0;
102 virtual void drop_reference() = 0;
103 };
104
105 virtual AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) = 0;
106 };
107
108 template<class T>
109 bool RGWQuotaCache<T>::can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& cached_stats)
110 {
111 if (quota.max_size >= 0) {
112 if (quota.max_size_soft_threshold < 0) {
113 quota.max_size_soft_threshold = quota.max_size * store->ctx()->_conf->rgw_bucket_quota_soft_threshold;
114 }
115
116 if (cached_stats.size_rounded >= (uint64_t)quota.max_size_soft_threshold) {
117 ldout(store->ctx(), 20) << "quota: can't use cached stats, exceeded soft threshold (size): "
118 << cached_stats.size_rounded << " >= " << quota.max_size_soft_threshold << dendl;
119 return false;
120 }
121 }
122
123 if (quota.max_objects >= 0) {
124 if (quota.max_objs_soft_threshold < 0) {
125 quota.max_objs_soft_threshold = quota.max_objects * store->ctx()->_conf->rgw_bucket_quota_soft_threshold;
126 }
127
128 if (cached_stats.num_objects >= (uint64_t)quota.max_objs_soft_threshold) {
129 ldout(store->ctx(), 20) << "quota: can't use cached stats, exceeded soft threshold (num objs): "
130 << cached_stats.num_objects << " >= " << quota.max_objs_soft_threshold << dendl;
131 return false;
132 }
133 }
134
135 return true;
136 }
137
138 template<class T>
139 int RGWQuotaCache<T>::async_refresh(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs)
140 {
141 /* protect against multiple updates */
142 StatsAsyncTestSet test_update;
143 if (!map_find_and_update(user, bucket, &test_update)) {
144 /* most likely we just raced with another update */
145 return 0;
146 }
147
148 async_refcount->get();
149
150
151 AsyncRefreshHandler *handler = allocate_refresh_handler(user, bucket);
152
153 int ret = handler->init_fetch();
154 if (ret < 0) {
155 async_refcount->put();
156 handler->drop_reference();
157 return ret;
158 }
159
160 return 0;
161 }
162
163 template<class T>
164 void RGWQuotaCache<T>::async_refresh_fail(const rgw_user& user, rgw_bucket& bucket)
165 {
166 ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
167
168 async_refcount->put();
169 }
170
171 template<class T>
172 void RGWQuotaCache<T>::async_refresh_response(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats)
173 {
174 ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
175
176 RGWQuotaCacheStats qs;
177
178 map_find(user, bucket, qs);
179
180 set_stats(user, bucket, qs, stats);
181
182 async_refcount->put();
183 }
184
185 template<class T>
186 void RGWQuotaCache<T>::set_stats(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, RGWStorageStats& stats)
187 {
188 qs.stats = stats;
189 qs.expiration = ceph_clock_now();
190 qs.async_refresh_time = qs.expiration;
191 qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl;
192 qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2;
193
194 map_add(user, bucket, qs);
195 }
196
197 template<class T>
198 int RGWQuotaCache<T>::get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota) {
199 RGWQuotaCacheStats qs;
200 utime_t now = ceph_clock_now();
201 if (map_find(user, bucket, qs)) {
202 if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) {
203 int r = async_refresh(user, bucket, qs);
204 if (r < 0) {
205 ldout(store->ctx(), 0) << "ERROR: quota async refresh returned ret=" << r << dendl;
206
207 /* continue processing, might be a transient error, async refresh is just optimization */
208 }
209 }
210
211 if (can_use_cached_stats(quota, qs.stats) && qs.expiration >
212 ceph_clock_now()) {
213 stats = qs.stats;
214 return 0;
215 }
216 }
217
218 int ret = fetch_stats_from_storage(user, bucket, stats);
219 if (ret < 0 && ret != -ENOENT)
220 return ret;
221
222 set_stats(user, bucket, qs, stats);
223
224 return 0;
225 }
226
227
228 template<class T>
229 class RGWQuotaStatsUpdate : public lru_map<T, RGWQuotaCacheStats>::UpdateContext {
230 const int objs_delta;
231 const uint64_t added_bytes;
232 const uint64_t removed_bytes;
233 public:
234 RGWQuotaStatsUpdate(const int objs_delta,
235 const uint64_t added_bytes,
236 const uint64_t removed_bytes)
237 : objs_delta(objs_delta),
238 added_bytes(added_bytes),
239 removed_bytes(removed_bytes) {
240 }
241
242 bool update(RGWQuotaCacheStats * const entry) override {
243 const uint64_t rounded_added = rgw_rounded_objsize(added_bytes);
244 const uint64_t rounded_removed = rgw_rounded_objsize(removed_bytes);
245
246 if (((int64_t)(entry->stats.size + added_bytes - removed_bytes)) >= 0) {
247 entry->stats.size += added_bytes - removed_bytes;
248 } else {
249 entry->stats.size = 0;
250 }
251
252 if (((int64_t)(entry->stats.size_rounded + rounded_added - rounded_removed)) >= 0) {
253 entry->stats.size_rounded += rounded_added - rounded_removed;
254 } else {
255 entry->stats.size_rounded = 0;
256 }
257
258 if (((int64_t)(entry->stats.num_objects + objs_delta)) >= 0) {
259 entry->stats.num_objects += objs_delta;
260 } else {
261 entry->stats.num_objects = 0;
262 }
263
264 return true;
265 }
266 };
267
268
269 template<class T>
270 void RGWQuotaCache<T>::adjust_stats(const rgw_user& user, rgw_bucket& bucket, int objs_delta,
271 uint64_t added_bytes, uint64_t removed_bytes)
272 {
273 RGWQuotaStatsUpdate<T> update(objs_delta, added_bytes, removed_bytes);
274 map_find_and_update(user, bucket, &update);
275
276 data_modified(user, bucket);
277 }
278
279 class BucketAsyncRefreshHandler : public RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler,
280 public RGWGetBucketStats_CB {
281 rgw_user user;
282 public:
283 BucketAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_bucket> *_cache,
284 const rgw_user& _user, const rgw_bucket& _bucket) :
285 RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler(_store, _cache),
286 RGWGetBucketStats_CB(_bucket), user(_user) {}
287
288 void drop_reference() override { put(); }
289 void handle_response(int r) override;
290 int init_fetch() override;
291 };
292
293 int BucketAsyncRefreshHandler::init_fetch()
294 {
295 RGWBucketInfo bucket_info;
296
297 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
298
299 int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL);
300 if (r < 0) {
301 ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
302 return r;
303 }
304
305 ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl;
306
307 r = store->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this);
308 if (r < 0) {
309 ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl;
310
311 /* get_bucket_stats_async() dropped our reference already */
312 return r;
313 }
314
315 return 0;
316 }
317
318 void BucketAsyncRefreshHandler::handle_response(const int r)
319 {
320 if (r < 0) {
321 ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
322 cache->async_refresh_fail(user, bucket);
323 return;
324 }
325
326 RGWStorageStats bs;
327
328 for (const auto& pair : *stats) {
329 const RGWStorageStats& s = pair.second;
330
331 bs.size += s.size;
332 bs.size_rounded += s.size_rounded;
333 bs.num_objects += s.num_objects;
334 }
335
336 cache->async_refresh_response(user, bucket, bs);
337 }
338
339 class RGWBucketStatsCache : public RGWQuotaCache<rgw_bucket> {
340 protected:
341 bool map_find(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
342 return stats_map.find(bucket, qs);
343 }
344
345 bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, lru_map<rgw_bucket, RGWQuotaCacheStats>::UpdateContext *ctx) override {
346 return stats_map.find_and_update(bucket, NULL, ctx);
347 }
348
349 void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
350 stats_map.add(bucket, qs);
351 }
352
353 int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats) override;
354
355 public:
356 explicit RGWBucketStatsCache(RGWRados *_store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
357 }
358
359 AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
360 return new BucketAsyncRefreshHandler(store, this, user, bucket);
361 }
362 };
363
364 int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats)
365 {
366 RGWBucketInfo bucket_info;
367
368 RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
369
370 int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL);
371 if (r < 0) {
372 ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
373 return r;
374 }
375
376 string bucket_ver;
377 string master_ver;
378
379 map<RGWObjCategory, RGWStorageStats> bucket_stats;
380 r = store->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver,
381 &master_ver, bucket_stats, nullptr);
382 if (r < 0) {
383 ldout(store->ctx(), 0) << "could not get bucket stats for bucket="
384 << bucket.name << dendl;
385 return r;
386 }
387
388 stats = RGWStorageStats();
389
390 for (const auto& pair : bucket_stats) {
391 const RGWStorageStats& s = pair.second;
392
393 stats.size += s.size;
394 stats.size_rounded += s.size_rounded;
395 stats.num_objects += s.num_objects;
396 }
397
398 return 0;
399 }
400
401 class UserAsyncRefreshHandler : public RGWQuotaCache<rgw_user>::AsyncRefreshHandler,
402 public RGWGetUserStats_CB {
403 rgw_bucket bucket;
404 public:
405 UserAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_user> *_cache,
406 const rgw_user& _user, const rgw_bucket& _bucket) :
407 RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_store, _cache),
408 RGWGetUserStats_CB(_user),
409 bucket(_bucket) {}
410
411 void drop_reference() override { put(); }
412 int init_fetch() override;
413 void handle_response(int r) override;
414 };
415
416 int UserAsyncRefreshHandler::init_fetch()
417 {
418 ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl;
419 int r = store->get_user_stats_async(user, this);
420 if (r < 0) {
421 ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl;
422
423 /* get_bucket_stats_async() dropped our reference already */
424 return r;
425 }
426
427 return 0;
428 }
429
430 void UserAsyncRefreshHandler::handle_response(int r)
431 {
432 if (r < 0) {
433 ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
434 cache->async_refresh_fail(user, bucket);
435 return;
436 }
437
438 cache->async_refresh_response(user, bucket, stats);
439 }
440
441 class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
442 std::atomic<bool> down_flag = { false };
443 RWLock rwlock;
444 map<rgw_bucket, rgw_user> modified_buckets;
445
446 /* thread, sync recent modified buckets info */
447 class BucketsSyncThread : public Thread {
448 CephContext *cct;
449 RGWUserStatsCache *stats;
450
451 Mutex lock;
452 Cond cond;
453 public:
454
455 BucketsSyncThread(CephContext *_cct, RGWUserStatsCache *_s) : cct(_cct), stats(_s), lock("RGWUserStatsCache::BucketsSyncThread") {}
456
457 void *entry() override {
458 ldout(cct, 20) << "BucketsSyncThread: start" << dendl;
459 do {
460 map<rgw_bucket, rgw_user> buckets;
461
462 stats->swap_modified_buckets(buckets);
463
464 for (map<rgw_bucket, rgw_user>::iterator iter = buckets.begin(); iter != buckets.end(); ++iter) {
465 rgw_bucket bucket = iter->first;
466 rgw_user& user = iter->second;
467 ldout(cct, 20) << "BucketsSyncThread: sync user=" << user << " bucket=" << bucket << dendl;
468 int r = stats->sync_bucket(user, bucket);
469 if (r < 0) {
470 ldout(cct, 0) << "WARNING: sync_bucket() returned r=" << r << dendl;
471 }
472 }
473
474 if (stats->going_down())
475 break;
476
477 lock.Lock();
478 cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_bucket_sync_interval, 0));
479 lock.Unlock();
480 } while (!stats->going_down());
481 ldout(cct, 20) << "BucketsSyncThread: done" << dendl;
482
483 return NULL;
484 }
485
486 void stop() {
487 Mutex::Locker l(lock);
488 cond.Signal();
489 }
490 };
491
492 /*
493 * thread, full sync all users stats periodically
494 *
495 * only sync non idle users or ones that never got synced before, this is needed so that
496 * users that didn't have quota turned on before (or existed before the user objclass
497 * tracked stats) need to get their backend stats up to date.
498 */
499 class UserSyncThread : public Thread {
500 CephContext *cct;
501 RGWUserStatsCache *stats;
502
503 Mutex lock;
504 Cond cond;
505 public:
506
507 UserSyncThread(CephContext *_cct, RGWUserStatsCache *_s) : cct(_cct), stats(_s), lock("RGWUserStatsCache::UserSyncThread") {}
508
509 void *entry() override {
510 ldout(cct, 20) << "UserSyncThread: start" << dendl;
511 do {
512 int ret = stats->sync_all_users();
513 if (ret < 0) {
514 ldout(cct, 5) << "ERROR: sync_all_users() returned ret=" << ret << dendl;
515 }
516
517 if (stats->going_down())
518 break;
519
520 lock.Lock();
521 cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_sync_interval, 0));
522 lock.Unlock();
523 } while (!stats->going_down());
524 ldout(cct, 20) << "UserSyncThread: done" << dendl;
525
526 return NULL;
527 }
528
529 void stop() {
530 Mutex::Locker l(lock);
531 cond.Signal();
532 }
533 };
534
535 BucketsSyncThread *buckets_sync_thread;
536 UserSyncThread *user_sync_thread;
537 protected:
538 bool map_find(const rgw_user& user,const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
539 return stats_map.find(user, qs);
540 }
541
542 bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, lru_map<rgw_user, RGWQuotaCacheStats>::UpdateContext *ctx) override {
543 return stats_map.find_and_update(user, NULL, ctx);
544 }
545
546 void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
547 stats_map.add(user, qs);
548 }
549
550 int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats) override;
551 int sync_bucket(const rgw_user& rgw_user, rgw_bucket& bucket);
552 int sync_user(const rgw_user& user);
553 int sync_all_users();
554
555 void data_modified(const rgw_user& user, rgw_bucket& bucket) override;
556
557 void swap_modified_buckets(map<rgw_bucket, rgw_user>& out) {
558 rwlock.get_write();
559 modified_buckets.swap(out);
560 rwlock.unlock();
561 }
562
563 template<class T> /* easier doing it as a template, Thread doesn't have ->stop() */
564 void stop_thread(T **pthr) {
565 T *thread = *pthr;
566 if (!thread)
567 return;
568
569 thread->stop();
570 thread->join();
571 delete thread;
572 *pthr = NULL;
573 }
574
575 public:
576 RGWUserStatsCache(RGWRados *_store, bool quota_threads) : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size),
577 rwlock("RGWUserStatsCache::rwlock") {
578 if (quota_threads) {
579 buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
580 buckets_sync_thread->create("rgw_buck_st_syn");
581 user_sync_thread = new UserSyncThread(store->ctx(), this);
582 user_sync_thread->create("rgw_user_st_syn");
583 } else {
584 buckets_sync_thread = NULL;
585 user_sync_thread = NULL;
586 }
587 }
588 ~RGWUserStatsCache() override {
589 stop();
590 }
591
592 AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
593 return new UserAsyncRefreshHandler(store, this, user, bucket);
594 }
595
596 bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats) override {
597 /* in the user case, the cached stats may contain a better estimation of the totals, as
598 * the backend is only periodically getting updated.
599 */
600 return true;
601 }
602
603 bool going_down() {
604 return down_flag;
605 }
606
607 void stop() {
608 down_flag = true;
609 rwlock.get_write();
610 stop_thread(&buckets_sync_thread);
611 rwlock.unlock();
612 stop_thread(&user_sync_thread);
613 }
614 };
615
616 int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats)
617 {
618 int r = store->get_user_stats(user, stats);
619 if (r < 0) {
620 ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl;
621 return r;
622 }
623
624 return 0;
625 }
626
627 int RGWUserStatsCache::sync_bucket(const rgw_user& user, rgw_bucket& bucket)
628 {
629 RGWBucketInfo bucket_info;
630
631 RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
632
633 int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL);
634 if (r < 0) {
635 ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
636 return r;
637 }
638
639 r = rgw_bucket_sync_user_stats(store, user, bucket_info);
640 if (r < 0) {
641 ldout(store->ctx(), 0) << "ERROR: rgw_bucket_sync_user_stats() for user=" << user << ", bucket=" << bucket << " returned " << r << dendl;
642 return r;
643 }
644
645 return 0;
646 }
647
648 int RGWUserStatsCache::sync_user(const rgw_user& user)
649 {
650 cls_user_header header;
651 string user_str = user.to_str();
652 int ret = store->cls_user_get_header(user_str, &header);
653 if (ret < 0) {
654 ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl;
655 return ret;
656 }
657
658 if (!store->ctx()->_conf->rgw_user_quota_sync_idle_users &&
659 header.last_stats_update < header.last_stats_sync) {
660 ldout(store->ctx(), 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl;
661 return 0;
662 }
663
664 real_time when_need_full_sync = header.last_stats_sync;
665 when_need_full_sync += make_timespan(store->ctx()->_conf->rgw_user_quota_sync_wait_time);
666
667 // check if enough time passed since last full sync
668 /* FIXME: missing check? */
669
670 ret = rgw_user_sync_all_stats(store, user);
671 if (ret < 0) {
672 ldout(store->ctx(), 0) << "ERROR: failed user stats sync, ret=" << ret << dendl;
673 return ret;
674 }
675
676 return 0;
677 }
678
679 int RGWUserStatsCache::sync_all_users()
680 {
681 string key = "user";
682 void *handle;
683
684 int ret = store->meta_mgr->list_keys_init(key, &handle);
685 if (ret < 0) {
686 ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl;
687 return ret;
688 }
689
690 bool truncated;
691 int max = 1000;
692
693 do {
694 list<string> keys;
695 ret = store->meta_mgr->list_keys_next(handle, max, keys, &truncated);
696 if (ret < 0) {
697 ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
698 goto done;
699 }
700 for (list<string>::iterator iter = keys.begin();
701 iter != keys.end() && !going_down();
702 ++iter) {
703 rgw_user user(*iter);
704 ldout(store->ctx(), 20) << "RGWUserStatsCache: sync user=" << user << dendl;
705 int ret = sync_user(user);
706 if (ret < 0) {
707 ldout(store->ctx(), 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl;
708
709 /* continuing to next user */
710 continue;
711 }
712 }
713 } while (truncated);
714
715 ret = 0;
716 done:
717 store->meta_mgr->list_keys_complete(handle);
718 return ret;
719 }
720
721 void RGWUserStatsCache::data_modified(const rgw_user& user, rgw_bucket& bucket)
722 {
723 /* racy, but it's ok */
724 rwlock.get_read();
725 bool need_update = modified_buckets.find(bucket) == modified_buckets.end();
726 rwlock.unlock();
727
728 if (need_update) {
729 rwlock.get_write();
730 modified_buckets[bucket] = user;
731 rwlock.unlock();
732 }
733 }
734
735
736 class RGWQuotaInfoApplier {
737 /* NOTE: no non-static field allowed as instances are supposed to live in
738 * the static memory only. */
739 protected:
740 RGWQuotaInfoApplier() = default;
741
742 public:
743 virtual ~RGWQuotaInfoApplier() {}
744
745 virtual bool is_size_exceeded(const char * const entity,
746 const RGWQuotaInfo& qinfo,
747 const RGWStorageStats& stats,
748 const uint64_t size) const = 0;
749
750 virtual bool is_num_objs_exceeded(const char * const entity,
751 const RGWQuotaInfo& qinfo,
752 const RGWStorageStats& stats,
753 const uint64_t num_objs) const = 0;
754
755 static const RGWQuotaInfoApplier& get_instance(const RGWQuotaInfo& qinfo);
756 };
757
758 class RGWQuotaInfoDefApplier : public RGWQuotaInfoApplier {
759 public:
760 bool is_size_exceeded(const char * const entity,
761 const RGWQuotaInfo& qinfo,
762 const RGWStorageStats& stats,
763 const uint64_t size) const override;
764
765 bool is_num_objs_exceeded(const char * const entity,
766 const RGWQuotaInfo& qinfo,
767 const RGWStorageStats& stats,
768 const uint64_t num_objs) const override;
769 };
770
771 class RGWQuotaInfoRawApplier : public RGWQuotaInfoApplier {
772 public:
773 bool is_size_exceeded(const char * const entity,
774 const RGWQuotaInfo& qinfo,
775 const RGWStorageStats& stats,
776 const uint64_t size) const override;
777
778 bool is_num_objs_exceeded(const char * const entity,
779 const RGWQuotaInfo& qinfo,
780 const RGWStorageStats& stats,
781 const uint64_t num_objs) const override;
782 };
783
784
785 bool RGWQuotaInfoDefApplier::is_size_exceeded(const char * const entity,
786 const RGWQuotaInfo& qinfo,
787 const RGWStorageStats& stats,
788 const uint64_t size) const
789 {
790 if (qinfo.max_size < 0) {
791 /* The limit is not enabled. */
792 return false;
793 }
794
795 const uint64_t cur_size = stats.size_rounded;
796 const uint64_t new_size = rgw_rounded_objsize(size);
797
798 if (cur_size + new_size > static_cast<uint64_t>(qinfo.max_size)) {
799 dout(10) << "quota exceeded: stats.size_rounded=" << stats.size_rounded
800 << " size=" << new_size << " "
801 << entity << "_quota.max_size=" << qinfo.max_size << dendl;
802 return true;
803 }
804
805 return false;
806 }
807
808 bool RGWQuotaInfoDefApplier::is_num_objs_exceeded(const char * const entity,
809 const RGWQuotaInfo& qinfo,
810 const RGWStorageStats& stats,
811 const uint64_t num_objs) const
812 {
813 if (qinfo.max_objects < 0) {
814 /* The limit is not enabled. */
815 return false;
816 }
817
818 if (stats.num_objects + num_objs > static_cast<uint64_t>(qinfo.max_objects)) {
819 dout(10) << "quota exceeded: stats.num_objects=" << stats.num_objects
820 << " " << entity << "_quota.max_objects=" << qinfo.max_objects
821 << dendl;
822 return true;
823 }
824
825 return false;
826 }
827
828 bool RGWQuotaInfoRawApplier::is_size_exceeded(const char * const entity,
829 const RGWQuotaInfo& qinfo,
830 const RGWStorageStats& stats,
831 const uint64_t size) const
832 {
833 if (qinfo.max_size < 0) {
834 /* The limit is not enabled. */
835 return false;
836 }
837
838 const uint64_t cur_size = stats.size;
839
840 if (cur_size + size > static_cast<uint64_t>(qinfo.max_size)) {
841 dout(10) << "quota exceeded: stats.size=" << stats.size
842 << " size=" << size << " "
843 << entity << "_quota.max_size=" << qinfo.max_size << dendl;
844 return true;
845 }
846
847 return false;
848 }
849
850 bool RGWQuotaInfoRawApplier::is_num_objs_exceeded(const char * const entity,
851 const RGWQuotaInfo& qinfo,
852 const RGWStorageStats& stats,
853 const uint64_t num_objs) const
854 {
855 if (qinfo.max_objects < 0) {
856 /* The limit is not enabled. */
857 return false;
858 }
859
860 if (stats.num_objects + num_objs > static_cast<uint64_t>(qinfo.max_objects)) {
861 dout(10) << "quota exceeded: stats.num_objects=" << stats.num_objects
862 << " " << entity << "_quota.max_objects=" << qinfo.max_objects
863 << dendl;
864 return true;
865 }
866
867 return false;
868 }
869
870 const RGWQuotaInfoApplier& RGWQuotaInfoApplier::get_instance(
871 const RGWQuotaInfo& qinfo)
872 {
873 static RGWQuotaInfoDefApplier default_qapplier;
874 static RGWQuotaInfoRawApplier raw_qapplier;
875
876 if (qinfo.check_on_raw) {
877 return raw_qapplier;
878 } else {
879 return default_qapplier;
880 }
881 }
882
883
884 class RGWQuotaHandlerImpl : public RGWQuotaHandler {
885 RGWRados *store;
886 RGWBucketStatsCache bucket_stats_cache;
887 RGWUserStatsCache user_stats_cache;
888
889 int check_quota(const char * const entity,
890 const RGWQuotaInfo& quota,
891 const RGWStorageStats& stats,
892 const uint64_t num_objs,
893 const uint64_t size) {
894 if (!quota.enabled) {
895 return 0;
896 }
897
898 const auto& quota_applier = RGWQuotaInfoApplier::get_instance(quota);
899
900 ldout(store->ctx(), 20) << entity
901 << " quota: max_objects=" << quota.max_objects
902 << " max_size=" << quota.max_size << dendl;
903
904
905 if (quota_applier.is_num_objs_exceeded(entity, quota, stats, num_objs)) {
906 return -ERR_QUOTA_EXCEEDED;
907 }
908
909 if (quota_applier.is_size_exceeded(entity, quota, stats, size)) {
910 return -ERR_QUOTA_EXCEEDED;
911 }
912
913 ldout(store->ctx(), 20) << entity << " quota OK:"
914 << " stats.num_objects=" << stats.num_objects
915 << " stats.size=" << stats.size << dendl;
916 return 0;
917 }
918 public:
919 RGWQuotaHandlerImpl(RGWRados *_store, bool quota_threads) : store(_store),
920 bucket_stats_cache(_store),
921 user_stats_cache(_store, quota_threads) {}
922
923 int check_quota(const rgw_user& user,
924 rgw_bucket& bucket,
925 RGWQuotaInfo& user_quota,
926 RGWQuotaInfo& bucket_quota,
927 uint64_t num_objs,
928 uint64_t size) override {
929
930 if (!bucket_quota.enabled && !user_quota.enabled) {
931 return 0;
932 }
933
934 /*
935 * we need to fetch bucket stats if the user quota is enabled, because
936 * the whole system relies on us periodically updating the user's bucket
937 * stats in the user's header, this happens in get_stats() if we actually
938 * fetch that info and not rely on cached data
939 */
940
941 if (bucket_quota.enabled) {
942 RGWStorageStats bucket_stats;
943 int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats,
944 bucket_quota);
945 if (ret < 0) {
946 return ret;
947 }
948 ret = check_quota("bucket", bucket_quota, bucket_stats, num_objs, size);
949 if (ret < 0) {
950 return ret;
951 }
952 }
953
954 if (user_quota.enabled) {
955 RGWStorageStats user_stats;
956 int ret = user_stats_cache.get_stats(user, bucket, user_stats, user_quota);
957 if (ret < 0) {
958 return ret;
959 }
960 ret = check_quota("user", user_quota, user_stats, num_objs, size);
961 if (ret < 0) {
962 return ret;
963 }
964 }
965 return 0;
966 }
967
968 void update_stats(const rgw_user& user, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) override {
969 bucket_stats_cache.adjust_stats(user, bucket, obj_delta, added_bytes, removed_bytes);
970 user_stats_cache.adjust_stats(user, bucket, obj_delta, added_bytes, removed_bytes);
971 }
972
973 int check_bucket_shards(uint64_t max_objs_per_shard, uint64_t num_shards,
974 const rgw_user& user, const rgw_bucket& bucket, RGWQuotaInfo& bucket_quota,
975 uint64_t num_objs, bool& need_resharding, uint32_t *suggested_num_shards) override
976 {
977 RGWStorageStats bucket_stats;
978 int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats,
979 bucket_quota);
980 if (ret < 0) {
981 return ret;
982 }
983
984 if (bucket_stats.num_objects + num_objs > num_shards * max_objs_per_shard) {
985 ldout(store->ctx(), 0) << __func__ << ": resharding needed: stats.num_objects=" << bucket_stats.num_objects
986 << " shard max_objects=" << max_objs_per_shard * num_shards << dendl;
987 need_resharding = true;
988 if (suggested_num_shards) {
989 *suggested_num_shards = (bucket_stats.num_objects + num_objs) * 2 / max_objs_per_shard;
990 }
991 } else {
992 need_resharding = false;
993 }
994
995 return 0;
996 }
997
998 };
999
1000
1001 RGWQuotaHandler *RGWQuotaHandler::generate_handler(RGWRados *store, bool quota_threads)
1002 {
1003 return new RGWQuotaHandlerImpl(store, quota_threads);
1004 }
1005
1006 void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler)
1007 {
1008 delete handler;
1009 }
1010
1011
1012 void rgw_apply_default_bucket_quota(RGWQuotaInfo& quota, const ConfigProxy& conf)
1013 {
1014 if (conf->rgw_bucket_default_quota_max_objects >= 0) {
1015 quota.max_objects = conf->rgw_bucket_default_quota_max_objects;
1016 quota.enabled = true;
1017 }
1018 if (conf->rgw_bucket_default_quota_max_size >= 0) {
1019 quota.max_size = conf->rgw_bucket_default_quota_max_size;
1020 quota.enabled = true;
1021 }
1022 }
1023
1024 void rgw_apply_default_user_quota(RGWQuotaInfo& quota, const ConfigProxy& conf)
1025 {
1026 if (conf->rgw_user_default_quota_max_objects >= 0) {
1027 quota.max_objects = conf->rgw_user_default_quota_max_objects;
1028 quota.enabled = true;
1029 }
1030 if (conf->rgw_user_default_quota_max_size >= 0) {
1031 quota.max_size = conf->rgw_user_default_quota_max_size;
1032 quota.enabled = true;
1033 }
1034 }