]>
Commit | Line | Data |
---|---|---|
1e59de90 TL |
1 | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | // This source code is licensed under both the GPLv2 (found in the | |
3 | // COPYING file in the root directory) and Apache 2.0 License | |
4 | // (found in the LICENSE.Apache file in the root directory). | |
5 | // | |
6 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |
7 | // Use of this source code is governed by a BSD-style license that can be | |
8 | // found in the LICENSE file. See the AUTHORS file for names of contributors. | |
9 | #include "cache/cache_reservation_manager.h" | |
10 | ||
11 | #include <cassert> | |
12 | #include <cstddef> | |
13 | #include <cstring> | |
14 | #include <memory> | |
15 | ||
16 | #include "cache/cache_entry_roles.h" | |
17 | #include "rocksdb/cache.h" | |
18 | #include "rocksdb/slice.h" | |
19 | #include "rocksdb/status.h" | |
20 | #include "table/block_based/reader_common.h" | |
21 | #include "util/coding.h" | |
22 | ||
23 | namespace ROCKSDB_NAMESPACE { | |
24 | ||
25 | template <CacheEntryRole R> | |
26 | CacheReservationManagerImpl<R>::CacheReservationHandle::CacheReservationHandle( | |
27 | std::size_t incremental_memory_used, | |
28 | std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr) | |
29 | : incremental_memory_used_(incremental_memory_used) { | |
30 | assert(cache_res_mgr); | |
31 | cache_res_mgr_ = cache_res_mgr; | |
32 | } | |
33 | ||
34 | template <CacheEntryRole R> | |
35 | CacheReservationManagerImpl< | |
36 | R>::CacheReservationHandle::~CacheReservationHandle() { | |
37 | Status s = cache_res_mgr_->ReleaseCacheReservation(incremental_memory_used_); | |
38 | s.PermitUncheckedError(); | |
39 | } | |
40 | ||
41 | template <CacheEntryRole R> | |
42 | CacheReservationManagerImpl<R>::CacheReservationManagerImpl( | |
43 | std::shared_ptr<Cache> cache, bool delayed_decrease) | |
44 | : delayed_decrease_(delayed_decrease), | |
45 | cache_allocated_size_(0), | |
46 | memory_used_(0) { | |
47 | assert(cache != nullptr); | |
48 | cache_ = cache; | |
49 | } | |
50 | ||
51 | template <CacheEntryRole R> | |
52 | CacheReservationManagerImpl<R>::~CacheReservationManagerImpl() { | |
53 | for (auto* handle : dummy_handles_) { | |
54 | cache_->Release(handle, true); | |
55 | } | |
56 | } | |
57 | ||
58 | template <CacheEntryRole R> | |
59 | Status CacheReservationManagerImpl<R>::UpdateCacheReservation( | |
60 | std::size_t new_mem_used) { | |
61 | memory_used_ = new_mem_used; | |
62 | std::size_t cur_cache_allocated_size = | |
63 | cache_allocated_size_.load(std::memory_order_relaxed); | |
64 | if (new_mem_used == cur_cache_allocated_size) { | |
65 | return Status::OK(); | |
66 | } else if (new_mem_used > cur_cache_allocated_size) { | |
67 | Status s = IncreaseCacheReservation(new_mem_used); | |
68 | return s; | |
69 | } else { | |
70 | // In delayed decrease mode, we don't decrease cache reservation | |
71 | // untill the memory usage is less than 3/4 of what we reserve | |
72 | // in the cache. | |
73 | // We do this because | |
74 | // (1) Dummy entry insertion is expensive in block cache | |
75 | // (2) Delayed releasing previously inserted dummy entries can save such | |
76 | // expensive dummy entry insertion on memory increase in the near future, | |
77 | // which is likely to happen when the memory usage is greater than or equal | |
78 | // to 3/4 of what we reserve | |
79 | if (delayed_decrease_ && new_mem_used >= cur_cache_allocated_size / 4 * 3) { | |
80 | return Status::OK(); | |
81 | } else { | |
82 | Status s = DecreaseCacheReservation(new_mem_used); | |
83 | return s; | |
84 | } | |
85 | } | |
86 | } | |
87 | ||
88 | template <CacheEntryRole R> | |
89 | Status CacheReservationManagerImpl<R>::MakeCacheReservation( | |
90 | std::size_t incremental_memory_used, | |
91 | std::unique_ptr<CacheReservationManager::CacheReservationHandle>* handle) { | |
92 | assert(handle); | |
93 | Status s = | |
94 | UpdateCacheReservation(GetTotalMemoryUsed() + incremental_memory_used); | |
95 | (*handle).reset(new CacheReservationManagerImpl::CacheReservationHandle( | |
96 | incremental_memory_used, | |
97 | std::enable_shared_from_this< | |
98 | CacheReservationManagerImpl<R>>::shared_from_this())); | |
99 | return s; | |
100 | } | |
101 | ||
102 | template <CacheEntryRole R> | |
103 | Status CacheReservationManagerImpl<R>::ReleaseCacheReservation( | |
104 | std::size_t incremental_memory_used) { | |
105 | assert(GetTotalMemoryUsed() >= incremental_memory_used); | |
106 | std::size_t updated_total_mem_used = | |
107 | GetTotalMemoryUsed() - incremental_memory_used; | |
108 | Status s = UpdateCacheReservation(updated_total_mem_used); | |
109 | return s; | |
110 | } | |
111 | ||
112 | template <CacheEntryRole R> | |
113 | Status CacheReservationManagerImpl<R>::IncreaseCacheReservation( | |
114 | std::size_t new_mem_used) { | |
115 | Status return_status = Status::OK(); | |
116 | while (new_mem_used > cache_allocated_size_.load(std::memory_order_relaxed)) { | |
117 | Cache::Handle* handle = nullptr; | |
118 | return_status = cache_->Insert(GetNextCacheKey(), nullptr, kSizeDummyEntry, | |
119 | GetNoopDeleterForRole<R>(), &handle); | |
120 | ||
121 | if (return_status != Status::OK()) { | |
122 | return return_status; | |
123 | } | |
124 | ||
125 | dummy_handles_.push_back(handle); | |
126 | cache_allocated_size_ += kSizeDummyEntry; | |
127 | } | |
128 | return return_status; | |
129 | } | |
130 | ||
131 | template <CacheEntryRole R> | |
132 | Status CacheReservationManagerImpl<R>::DecreaseCacheReservation( | |
133 | std::size_t new_mem_used) { | |
134 | Status return_status = Status::OK(); | |
135 | ||
136 | // Decrease to the smallest multiple of kSizeDummyEntry that is greater than | |
137 | // or equal to new_mem_used We do addition instead of new_mem_used <= | |
138 | // cache_allocated_size_.load(std::memory_order_relaxed) - kSizeDummyEntry to | |
139 | // avoid underflow of size_t when cache_allocated_size_ = 0 | |
140 | while (new_mem_used + kSizeDummyEntry <= | |
141 | cache_allocated_size_.load(std::memory_order_relaxed)) { | |
142 | assert(!dummy_handles_.empty()); | |
143 | auto* handle = dummy_handles_.back(); | |
144 | cache_->Release(handle, true); | |
145 | dummy_handles_.pop_back(); | |
146 | cache_allocated_size_ -= kSizeDummyEntry; | |
147 | } | |
148 | return return_status; | |
149 | } | |
150 | ||
151 | template <CacheEntryRole R> | |
152 | std::size_t CacheReservationManagerImpl<R>::GetTotalReservedCacheSize() { | |
153 | return cache_allocated_size_.load(std::memory_order_relaxed); | |
154 | } | |
155 | ||
156 | template <CacheEntryRole R> | |
157 | std::size_t CacheReservationManagerImpl<R>::GetTotalMemoryUsed() { | |
158 | return memory_used_; | |
159 | } | |
160 | ||
161 | template <CacheEntryRole R> | |
162 | Slice CacheReservationManagerImpl<R>::GetNextCacheKey() { | |
163 | // Calling this function will have the side-effect of changing the | |
164 | // underlying cache_key_ that is shared among other keys generated from this | |
165 | // fucntion. Therefore please make sure the previous keys are saved/copied | |
166 | // before calling this function. | |
167 | cache_key_ = CacheKey::CreateUniqueForCacheLifetime(cache_.get()); | |
168 | return cache_key_.AsSlice(); | |
169 | } | |
170 | ||
171 | template <CacheEntryRole R> | |
172 | Cache::DeleterFn CacheReservationManagerImpl<R>::TEST_GetNoopDeleterForRole() { | |
173 | return GetNoopDeleterForRole<R>(); | |
174 | } | |
175 | ||
176 | template class CacheReservationManagerImpl< | |
177 | CacheEntryRole::kBlockBasedTableReader>; | |
178 | template class CacheReservationManagerImpl< | |
179 | CacheEntryRole::kCompressionDictionaryBuildingBuffer>; | |
180 | template class CacheReservationManagerImpl<CacheEntryRole::kFilterConstruction>; | |
181 | template class CacheReservationManagerImpl<CacheEntryRole::kMisc>; | |
182 | template class CacheReservationManagerImpl<CacheEntryRole::kWriteBuffer>; | |
183 | template class CacheReservationManagerImpl<CacheEntryRole::kFileMetadata>; | |
184 | template class CacheReservationManagerImpl<CacheEntryRole::kBlobCache>; | |
185 | } // namespace ROCKSDB_NAMESPACE |