1 //! Least recently used (LRU) cache
3 //! Implements a cache with least recently used cache replacement policy.
4 //! A HashMap is used for fast access by a given key and a doubly linked list
5 //! is used to keep track of the cache access order.
7 use std
::collections
::{HashMap, hash_map::Entry}
;
8 use std
::marker
::PhantomData
;
10 /// Interface for getting values on cache misses.
11 pub trait Cacher
<K
, V
> {
12 /// Fetch a value for key on cache miss.
14 /// Whenever a cache miss occurs, the fetch method provides a corresponding value.
15 /// If no value can be obtained for the given key, None is returned, the cache is
16 /// not updated in that case.
17 fn fetch(&mut self, key
: K
) -> Result
<Option
<V
>, anyhow
::Error
>;
20 /// Node of the doubly linked list storing key and value
21 struct CacheNode
<K
, V
> {
22 // We need to additionally store the key to be able to remove it
23 // from the HashMap when removing the tail.
26 prev
: *mut CacheNode
<K
, V
>,
27 next
: *mut CacheNode
<K
, V
>,
28 // Dropcheck marker. See the phantom-data section in the rustonomicon.
29 _marker
: PhantomData
<Box
<CacheNode
<K
, V
>>>,
32 impl<K
, V
> CacheNode
<K
, V
> {
33 fn new(key
: K
, value
: V
) -> Self {
37 prev
: std
::ptr
::null_mut(),
38 next
: std
::ptr
::null_mut(),
44 /// LRU cache instance.
48 /// # use self::proxmox_backup::tools::lru_cache::{Cacher, LruCache};
49 /// # fn main() -> Result<(), anyhow::Error> {
50 /// struct LruCacher {};
52 /// impl Cacher<u64, u64> for LruCacher {
53 /// fn fetch(&mut self, key: u64) -> Result<Option<u64>, anyhow::Error> {
58 /// let mut cache = LruCache::new(3);
60 /// assert_eq!(cache.get_mut(1), None);
61 /// assert_eq!(cache.len(), 0);
63 /// cache.insert(1, 1);
64 /// cache.insert(2, 2);
65 /// cache.insert(3, 3);
66 /// cache.insert(4, 4);
67 /// assert_eq!(cache.len(), 3);
69 /// assert_eq!(cache.get_mut(1), None);
70 /// assert_eq!(cache.get_mut(2), Some(&mut 2));
71 /// assert_eq!(cache.get_mut(3), Some(&mut 3));
72 /// assert_eq!(cache.get_mut(4), Some(&mut 4));
77 /// assert_eq!(cache.len(), 0);
78 /// assert_eq!(cache.get_mut(2), None);
79 /// // access will fill in missing cache entry by fetching from LruCacher
80 /// assert_eq!(cache.access(2, &mut LruCacher {}).unwrap(), Some(&mut 2));
82 /// cache.insert(1, 1);
83 /// assert_eq!(cache.get_mut(1), Some(&mut 1));
86 /// assert_eq!(cache.len(), 0);
87 /// assert_eq!(cache.get_mut(1), None);
91 pub struct LruCache
<K
, V
> {
92 /// Quick access to individual nodes via the node pointer.
93 map
: HashMap
<K
, *mut CacheNode
<K
, V
>>,
94 /// Actual nodes stored in a linked list.
95 list
: LinkedList
<K
, V
>,
96 /// Max nodes the cache can hold, temporarily exceeded by 1 due to
97 /// implementation details.
99 // Dropcheck marker. See the phantom-data section in the rustonomicon.
100 _marker
: PhantomData
<Box
<CacheNode
<K
, V
>>>,
103 // trivial: if our contents are Send, the whole cache is Send
104 unsafe impl<K
: Send
, V
: Send
> Send
for LruCache
<K
, V
> {}
106 impl<K
: std
::cmp
::Eq
+ std
::hash
::Hash
+ Copy
, V
> LruCache
<K
, V
> {
107 /// Create LRU cache instance which holds up to `capacity` nodes at once.
108 pub fn new(capacity
: usize) -> Self {
110 map
: HashMap
::with_capacity(capacity
),
111 list
: LinkedList
::new(),
113 _marker
: PhantomData
,
117 /// Clear all the entries from the cache.
118 pub fn clear(&mut self) {
119 // This frees only the HashMap with the node pointers.
121 // This frees the actual nodes and resets the list head and tail.
125 /// Insert or update an entry identified by `key` with the given `value`.
126 /// This entry is placed as the most recently used node at the head.
127 pub fn insert(&mut self, key
: K
, value
: V
) {
128 match self.map
.entry(key
) {
129 Entry
::Occupied(mut o
) => {
130 // Node present, update value
131 let node_ptr
= *o
.get_mut();
132 self.list
.bring_to_front(node_ptr
);
133 let mut node
= unsafe { Box::from_raw(node_ptr) }
;
135 let _node_ptr
= Box
::into_raw(node
);
137 Entry
::Vacant(v
) => {
138 // Node not present, insert a new one
139 // Unfortunately we need a copy of the key here, therefore it has
140 // to impl the copy trait
141 let node
= Box
::new(CacheNode
::new(key
, value
));
142 let node_ptr
= Box
::into_raw(node
);
143 self.list
.push_front(node_ptr
);
145 // If we have more elements than capacity,
146 // delete the lists tail node (= oldest node).
147 // This needs to be executed after the insert in order to
148 // avoid borrow conflict. This means there are temporarily
149 // self.capacity + 1 cache nodes.
150 if self.map
.len() > self.capacity
{
157 /// Remove the given `key` and its `value` from the cache.
158 pub fn remove(&mut self, key
: K
) -> Option
<V
> {
159 // Remove node pointer from the HashMap and get ownership of the node
160 let node_ptr
= self.map
.remove(&key
)?
;
161 let node
= self.list
.remove(node_ptr
);
165 /// Remove the least recently used node from the cache.
166 fn pop_tail(&mut self) {
167 if let Some(old_tail
) = self.list
.pop_tail() {
168 // Remove HashMap entry for old tail
169 self.map
.remove(&old_tail
.key
);
173 /// Get a mutable reference to the value identified by `key`.
174 /// This will update the cache entry to be the most recently used entry.
175 /// On cache misses, None is returned.
176 pub fn get_mut(&mut self, key
: K
) -> Option
<&mut V
> {
177 let node_ptr
= self.map
.get(&key
)?
;
178 self.list
.bring_to_front(*node_ptr
);
179 Some(unsafe { &mut (*self.list.head).value }
)
182 /// Number of entries in the cache.
183 pub fn len(&self) -> usize {
187 /// Returns `true` when the cache is empty
188 pub fn is_empty(&self) -> bool
{
192 /// Get a mutable reference to the value identified by `key`.
193 /// This will update the cache entry to be the most recently used entry.
194 /// On cache misses, the cachers fetch method is called to get a corresponding
196 /// If fetch returns a value, it is inserted as the most recently used entry
198 pub fn access
<'a
>(&'a
mut self, key
: K
, cacher
: &mut dyn Cacher
<K
, V
>) -> Result
<Option
<&'a
mut V
>, anyhow
::Error
> {
199 match self.map
.entry(key
) {
200 Entry
::Occupied(mut o
) => {
201 // Cache hit, birng node to front of list
202 let node_ptr
= *o
.get_mut();
203 self.list
.bring_to_front(node_ptr
);
205 Entry
::Vacant(v
) => {
206 // Cache miss, try to fetch from cacher and insert at the front
207 match cacher
.fetch(key
)?
{
208 None
=> return Ok(None
),
210 // Unfortunately we need a copy of the key here, therefore it has
211 // to impl the copy trait
212 let node
= Box
::new(CacheNode
::new(key
, value
));
213 let node_ptr
= Box
::into_raw(node
);
214 self.list
.push_front(node_ptr
);
216 // If we have more elements than capacity,
217 // delete the lists tail node (= oldest node).
218 // This needs to be executed after the insert in order to
219 // avoid borrow conflict. This means there are temporarily
220 // self.capacity + 1 cache nodes.
221 if self.map
.len() > self.capacity
{
229 Ok(Some(unsafe { &mut (*self.list.head).value }
))
233 /// Linked list holding the nodes of the LruCache.
235 /// This struct actually holds the CacheNodes via the raw linked list pointers
236 /// and allows to define the access sequence of these via the list sequence.
237 /// The LinkedList of the standard library unfortunately does not implement
238 /// an efficient way to bring list entries to the front, therefore we need our own.
239 struct LinkedList
<K
, V
> {
240 head
: *mut CacheNode
<K
, V
>,
241 tail
: *mut CacheNode
<K
, V
>,
244 impl<K
, V
> LinkedList
<K
, V
> {
245 /// Create a new empty linked list.
248 head
: std
::ptr
::null_mut(),
249 tail
: std
::ptr
::null_mut(),
253 /// Bring the CacheNode referenced by `node_ptr` to the front of the linked list.
254 fn bring_to_front(&mut self, node_ptr
: *mut CacheNode
<K
, V
>) {
255 if node_ptr
== self.head
{
256 // node is already head, just return
260 let mut node
= unsafe { Box::from_raw(node_ptr) }
;
261 // Update the prev node to point to next (or null if current node is tail)
262 unsafe { (*node.prev).next = node.next }
;
264 // Update the next node or otherwise the tail
265 if !node
.next
.is_null() {
266 unsafe { (*node.next).prev = node.prev }
;
268 // No next node means this was the tail
269 self.tail
= node
.prev
;
272 node
.prev
= std
::ptr
::null_mut();
273 node
.next
= self.head
;
274 // update the head and release ownership of the node again
275 let node_ptr
= Box
::into_raw(node
);
276 // Update current head
277 unsafe { (*self.head).prev = node_ptr }
;
278 // Update to new head
279 self.head
= node_ptr
;
282 /// Insert a new node at the front of the linked list.
283 fn push_front(&mut self, node_ptr
: *mut CacheNode
<K
, V
>) {
284 let mut node
= unsafe { Box::from_raw(node_ptr) }
;
286 // Old head gets new heads next
287 node
.next
= self.head
;
288 // Release ownership of node, rest can be handled with just the pointer.
289 let node_ptr
= Box
::into_raw(node
);
291 // Update the prev for the old head
292 if !self.head
.is_null() {
293 unsafe { (*self.head).prev = node_ptr }
;
296 // Update the head to the new node pointer
297 self.head
= node_ptr
;
299 // If there was no old tail, this node will be the new tail too
300 if self.tail
.is_null() {
301 self.tail
= node_ptr
;
305 /// Remove the node referenced by `node_ptr` from the linked list and return it.
306 fn remove(&mut self, node_ptr
: *mut CacheNode
<K
, V
>) -> Box
<CacheNode
<K
, V
>> {
307 let node
= unsafe { Box::from_raw(node_ptr) }
;
309 // Update the previous node or otherwise the head
310 if !node
.prev
.is_null() {
311 unsafe { (*node.prev).next = node.next }
;
313 // No previous node means this was the head
314 self.head
= node
.next
;
317 // Update the next node or otherwise the tail
318 if !node
.next
.is_null() {
319 unsafe { (*node.next).prev = node.prev }
;
321 // No next node means this was the tail
322 self.tail
= node
.prev
;
327 /// Remove the tail node from the linked list and return it.
328 fn pop_tail(&mut self) -> Option
<Box
<CacheNode
<K
, V
>>> {
329 if self.tail
.is_null() {
333 let old_tail
= unsafe { Box::from_raw(self.tail) }
;
334 self.tail
= old_tail
.prev
;
335 // Update next node for new tail
336 if !self.tail
.is_null() {
337 unsafe { (*self.tail).next = std::ptr::null_mut() }
;
342 /// Clear the linked list and free all the nodes.
343 fn clear(&mut self) {
344 let mut next
= self.head
;
345 while !next
.is_null() {
346 // Taking ownership of node and drop it at the end of the block.
347 let current
= unsafe { Box::from_raw(next) }
;
350 // Reset head and tail pointers
351 self.head
= std
::ptr
::null_mut();
352 self.tail
= std
::ptr
::null_mut();
357 fn test_linked_list() {
358 let mut list
= LinkedList
::new();
360 let node
= Box
::new(CacheNode
::new(idx
, idx
+ 1));
361 // Get pointer, release ownership.
362 let node_ptr
= Box
::into_raw(node
);
363 list
.push_front(node_ptr
);
365 assert_eq
!(unsafe { (*list.head).key }
, 2);
366 assert_eq
!(unsafe { (*list.head).value }
, 3);
367 assert_eq
!(unsafe { (*list.tail).key }
, 0);
368 assert_eq
!(unsafe { (*list.tail).value }
, 1);
370 list
.bring_to_front(list
.tail
);
371 assert_eq
!(unsafe { (*list.head).key }
, 0);
372 assert_eq
!(unsafe { (*list.head).value }
, 1);
373 assert_eq
!(unsafe { (*list.tail).key }
, 1);
374 assert_eq
!(unsafe { (*list.tail).value }
, 2);
376 list
.bring_to_front(list
.tail
);
377 assert_eq
!(unsafe { (*list.head).key }
, 1);
378 assert_eq
!(unsafe { (*list.head).value }
, 2);
379 assert_eq
!(unsafe { (*list.tail).key }
, 2);
380 assert_eq
!(unsafe { (*list.tail).value }
, 3);
382 let tail
= list
.pop_tail().unwrap();
383 assert_eq
!(tail
.key
, 2);
384 assert_eq
!(tail
.value
, 3);
385 assert_eq
!(unsafe { (*list.head).key }
, 1);
386 assert_eq
!(unsafe { (*list.head).value }
, 2);
387 assert_eq
!(unsafe { (*list.tail).key }
, 0);
388 assert_eq
!(unsafe { (*list.tail).value }
, 1);
391 assert
!(list
.head
.is_null());
392 assert
!(list
.tail
.is_null());