1 // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
13 #include "cache/cache_helpers.h"
14 #include "cache/cache_key.h"
15 #include "port/lang.h"
16 #include "rocksdb/cache.h"
17 #include "rocksdb/status.h"
18 #include "rocksdb/system_clock.h"
19 #include "test_util/sync_point.h"
20 #include "util/coding_lean.h"
22 namespace ROCKSDB_NAMESPACE
{
24 // A generic helper object for gathering stats about cache entries by
25 // iterating over them with ApplyToAllEntries. This class essentially
26 // solves the problem of slowing down a Cache with too many stats
27 // collectors that could be sharing stat results, such as from multiple
28 // column families or multiple DBs sharing a Cache. We employ a few
30 // * Only one collector for a particular kind of Stats is alive
31 // for each Cache. This is guaranteed using the Cache itself to hold
33 // * A mutex ensures only one thread is gathering stats for this
35 // * The most recent gathered stats are saved and simply copied to
36 // satisfy requests within a time window (default: 3 minutes) of
37 // completion of the most recent stat gathering.
39 // Template parameter Stats must be copyable and trivially constructable,
42 // // Notification before applying callback to all entries
43 // void BeginCollection(Cache*, SystemClock*, uint64_t start_time_micros);
44 // // Get the callback to apply to all entries. `callback`
45 // // type must be compatible with Cache::ApplyToAllEntries
46 // callback GetEntryCallback();
47 // // Notification after applying callback to all entries
48 // void EndCollection(Cache*, SystemClock*, uint64_t end_time_micros);
49 // // Notification that a collection was skipped because of
50 // // sufficiently recent saved results.
51 // void SkippedCollection();
53 template <class Stats
>
54 class CacheEntryStatsCollector
{
56 // Gather and save stats if saved stats are too old. (Use GetStats() to
59 // Maximum allowed age for a "hit" on saved results is determined by the
60 // two interval parameters. Both set to 0 forces a re-scan. For example
61 // with min_interval_seconds=300 and min_interval_factor=100, if the last
62 // scan took 10s, we would only rescan ("miss") if the age in seconds of
63 // the saved results is > max(300, 100*10).
64 // Justification: scans can vary wildly in duration, e.g. from 0.02 sec
65 // to as much as 20 seconds, so we want to be able to cap the absolute
66 // and relative frequency of scans.
67 void CollectStats(int min_interval_seconds
, int min_interval_factor
) {
68 // Waits for any pending reader or writer (collector)
69 std::lock_guard
<std::mutex
> lock(working_mutex_
);
71 uint64_t max_age_micros
=
72 static_cast<uint64_t>(std::max(min_interval_seconds
, 0)) * 1000000U;
74 if (last_end_time_micros_
> last_start_time_micros_
&&
75 min_interval_factor
> 0) {
76 max_age_micros
= std::max(
77 max_age_micros
, min_interval_factor
* (last_end_time_micros_
-
78 last_start_time_micros_
));
81 uint64_t start_time_micros
= clock_
->NowMicros();
82 if ((start_time_micros
- last_end_time_micros_
) > max_age_micros
) {
83 last_start_time_micros_
= start_time_micros
;
84 working_stats_
.BeginCollection(cache_
, clock_
, start_time_micros
);
86 cache_
->ApplyToAllEntries(working_stats_
.GetEntryCallback(), {});
87 TEST_SYNC_POINT_CALLBACK(
88 "CacheEntryStatsCollector::GetStats:AfterApplyToAllEntries", nullptr);
90 uint64_t end_time_micros
= clock_
->NowMicros();
91 last_end_time_micros_
= end_time_micros
;
92 working_stats_
.EndCollection(cache_
, clock_
, end_time_micros
);
94 working_stats_
.SkippedCollection();
97 // Save so that we don't need to wait for an outstanding collection in
98 // order to make of copy of the last saved stats
99 std::lock_guard
<std::mutex
> lock2(saved_mutex_
);
100 saved_stats_
= working_stats_
;
103 // Gets saved stats, regardless of age
104 void GetStats(Stats
*stats
) {
105 std::lock_guard
<std::mutex
> lock(saved_mutex_
);
106 *stats
= saved_stats_
;
109 Cache
*GetCache() const { return cache_
; }
111 // Gets or creates a shared instance of CacheEntryStatsCollector in the
112 // cache itself, and saves into `ptr`. This shared_ptr will hold the
113 // entry in cache until all refs are destroyed.
114 static Status
GetShared(Cache
*cache
, SystemClock
*clock
,
115 std::shared_ptr
<CacheEntryStatsCollector
> *ptr
) {
116 const Slice
&cache_key
= GetCacheKey();
118 Cache::Handle
*h
= cache
->Lookup(cache_key
);
120 // Not yet in cache, but Cache doesn't provide a built-in way to
121 // avoid racing insert. So we double-check under a shared mutex,
122 // inspired by TableCache.
123 STATIC_AVOID_DESTRUCTION(std::mutex
, static_mutex
);
124 std::lock_guard
<std::mutex
> lock(static_mutex
);
126 h
= cache
->Lookup(cache_key
);
128 auto new_ptr
= new CacheEntryStatsCollector(cache
, clock
);
129 // TODO: non-zero charge causes some tests that count block cache
130 // usage to go flaky. Fix the problem somehow so we can use an
133 Status s
= cache
->Insert(cache_key
, new_ptr
, charge
, Deleter
, &h
,
134 Cache::Priority::HIGH
);
136 assert(h
== nullptr);
142 // If we reach here, shared entry is in cache with handle `h`.
143 assert(cache
->GetDeleter(h
) == Deleter
);
145 // Build an aliasing shared_ptr that keeps `ptr` in cache while there
147 *ptr
= MakeSharedCacheHandleGuard
<CacheEntryStatsCollector
>(cache
, h
);
152 explicit CacheEntryStatsCollector(Cache
*cache
, SystemClock
*clock
)
155 last_start_time_micros_(0),
156 last_end_time_micros_(/*pessimistic*/ 10000000),
160 static void Deleter(const Slice
&, void *value
) {
161 delete static_cast<CacheEntryStatsCollector
*>(value
);
164 static const Slice
&GetCacheKey() {
165 // For each template instantiation
166 static CacheKey ckey
= CacheKey::CreateUniqueForProcessLifetime();
167 static Slice ckey_slice
= ckey
.AsSlice();
171 std::mutex saved_mutex_
;
174 std::mutex working_mutex_
;
175 Stats working_stats_
;
176 uint64_t last_start_time_micros_
;
177 uint64_t last_end_time_micros_
;
180 SystemClock
*const clock_
;
183 } // namespace ROCKSDB_NAMESPACE