1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Copyright (C) 2015 CohortFS, LLC.
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
16 #include <boost/intrusive/list.hpp>
17 #include <boost/intrusive/slist.hpp>
19 #include "common/likely.h"
21 #ifndef CACHE_LINE_SIZE
22 #define CACHE_LINE_SIZE 64 /* XXX arch-specific define */
24 #define CACHE_PAD(_n) char __pad ## _n [CACHE_LINE_SIZE]
30 namespace bi
= boost::intrusive
;
32 /* public flag values */
33 constexpr uint32_t FLAG_NONE
= 0x0000;
34 constexpr uint32_t FLAG_INITIAL
= 0x0001;
35 constexpr uint32_t FLAG_RECYCLE
= 0x0002;
37 enum class Edge
: std::uint8_t
43 typedef bi::link_mode
<bi::safe_link
> link_mode
;
49 std::atomic
<uint32_t> lru_refcnt
;
50 std::atomic
<uint32_t> lru_adj
;
51 bi::list_member_hook
< link_mode
> lru_hook
;
53 typedef bi::list
<Object
,
55 Object
, bi::list_member_hook
< link_mode
>,
57 bi::constant_time_size
<true>> Queue
;
59 bi::slist_member_hook
< link_mode
> q2_hook
;
61 typedef bi::slist
<Object
,
63 Object
, bi::slist_member_hook
< link_mode
>,
65 bi::constant_time_size
<true>> Queue2
;
69 Object() : lru_flags(FLAG_NONE
), lru_refcnt(0), lru_adj(0) {}
71 uint32_t get_refcnt() const { return lru_refcnt
; }
73 virtual bool reclaim() = 0;
78 template <typename LK
>
81 template <typename T
, typename TTree
, typename CLT
, typename CEQ
,
82 typename K
, typename LK
>
86 /* allocator & recycler interface (create or re-use LRU objects) */
90 virtual Object
* alloc(void) = 0;
91 virtual void recycle(Object
*) = 0;
92 virtual ~ObjectFactory() {};
95 template <typename LK
>
103 // Object::Queue pinned; /* placeholder for possible expansion */
110 std::atomic
<uint32_t> evict_lane
;
111 const uint32_t lane_hiwat
;
113 static constexpr uint32_t lru_adj_modulus
= 5;
115 static constexpr uint32_t SENTINEL_REFCNT
= 1;
117 /* internal flag values */
118 static constexpr uint32_t FLAG_INLRU
= 0x0001;
119 static constexpr uint32_t FLAG_PINNED
= 0x0002; // possible future use
120 static constexpr uint32_t FLAG_EVICTING
= 0x0004;
122 Lane
& lane_of(void* addr
) {
123 return qlane
[(uint64_t)(addr
) % n_lanes
];
126 uint32_t next_evict_lane() {
127 return (evict_lane
++ % n_lanes
);
130 bool can_reclaim(Object
* o
) {
131 return ((o
->lru_refcnt
== SENTINEL_REFCNT
) &&
132 (!(o
->lru_flags
& FLAG_EVICTING
)));
135 Object
* evict_block() {
136 uint32_t lane_ix
= next_evict_lane();
137 for (int ix
= 0; ix
< n_lanes
; ++ix
,
138 lane_ix
= next_evict_lane()) {
139 Lane
& lane
= qlane
[lane_ix
];
141 /* if object at LRU has refcnt==1, it may be reclaimable */
142 Object
* o
= &(lane
.q
.back());
143 if (can_reclaim(o
)) {
145 o
->lru_flags
|= FLAG_EVICTING
;
150 /* assertions that o state has not changed across
152 assert(o
->lru_refcnt
== SENTINEL_REFCNT
);
153 assert(o
->lru_flags
& FLAG_INLRU
);
154 Object::Queue::iterator it
=
155 Object::Queue::s_iterator_to(*o
);
160 // XXX can't make unreachable (means what?)
162 o
->lru_flags
&= ~FLAG_EVICTING
;
163 /* unlock in next block */
165 } /* can_reclaim(o) */
173 LRU(int lanes
, uint32_t _hiwat
)
174 : n_lanes(lanes
), evict_lane(0), lane_hiwat(_hiwat
)
177 qlane
= new Lane
[n_lanes
];
180 ~LRU() { delete[] qlane
; }
182 bool ref(Object
* o
, uint32_t flags
) {
184 if (flags
& FLAG_INITIAL
) {
185 if ((++(o
->lru_adj
) % lru_adj_modulus
) == 0) {
186 Lane
& lane
= lane_of(o
);
189 Object::Queue::iterator it
=
190 Object::Queue::s_iterator_to(*o
);
192 lane
.q
.push_front(*o
);
199 void unref(Object
* o
, uint32_t flags
) {
200 uint32_t refcnt
= --(o
->lru_refcnt
);
201 Object
* tdo
= nullptr;
202 if (unlikely(refcnt
== 0)) {
203 Lane
& lane
= lane_of(o
);
205 refcnt
= o
->lru_refcnt
.load();
206 if (unlikely(refcnt
== 0)) {
207 Object::Queue::iterator it
=
208 Object::Queue::s_iterator_to(*o
);
213 } else if (unlikely(refcnt
== SENTINEL_REFCNT
)) {
214 Lane
& lane
= lane_of(o
);
216 refcnt
= o
->lru_refcnt
.load();
217 if (likely(refcnt
== SENTINEL_REFCNT
)) {
219 Object::Queue::iterator it
=
220 Object::Queue::s_iterator_to(*o
);
223 if (lane
.q
.size() > lane_hiwat
) {
226 lane
.q
.push_back(*o
);
231 /* unref out-of-line && !LOCKED */
236 Object
* insert(ObjectFactory
* fac
, Edge edge
, uint32_t& flags
) {
237 /* use supplied functor to re-use an evicted object, or
238 * allocate a new one of the descendant type */
239 Object
* o
= evict_block();
241 fac
->recycle(o
); /* recycle existing object */
242 flags
|= FLAG_RECYCLE
;
245 o
= fac
->alloc(); /* get a new one */
247 o
->lru_flags
= FLAG_INLRU
;
249 Lane
& lane
= lane_of(o
);
253 lane
.q
.push_front(*o
);
256 lane
.q
.push_back(*o
);
262 if (flags
& FLAG_INITIAL
)
263 o
->lru_refcnt
+= 2; /* sentinel ref + initial */
265 ++(o
->lru_refcnt
); /* sentinel */
272 template <typename T
, typename TTree
, typename CLT
, typename CEQ
,
273 typename K
, typename LK
>
278 static constexpr uint32_t FLAG_NONE
= 0x0000;
279 static constexpr uint32_t FLAG_LOCK
= 0x0001;
280 static constexpr uint32_t FLAG_UNLOCK
= 0x0002;
281 static constexpr uint32_t FLAG_UNLOCK_ON_MISS
= 0x0004;
283 typedef T value_type
;
284 typedef TTree container_type
;
285 typedef typename
TTree::iterator iterator
;
286 typedef std::pair
<iterator
, bool> check_result
;
287 typedef typename
TTree::insert_commit_data insert_commit_data
;
291 typedef std::unique_lock
<LK
> unique_lock
;
300 Partition() : tr(), cache(nullptr), csz(0) {}
304 ::operator delete(cache
);
311 insert_commit_data commit_data
;
313 Latch() : p(nullptr), lock(nullptr) {}
316 Partition
& partition_of_scalar(uint64_t x
) {
317 return part
[x
% n_part
];
320 Partition
& get(uint8_t x
) {
329 std::for_each(locks
.begin(), locks
.end(),
330 [](LK
* lk
){ lk
->lock(); });
334 std::for_each(locks
.begin(), locks
.end(),
335 [](LK
* lk
){ lk
->unlock(); });
338 TreeX(int n_part
=1, int csz
=127) : n_part(n_part
), csz(csz
) {
340 part
= new Partition
[n_part
];
341 for (int ix
= 0; ix
< n_part
; ++ix
) {
342 Partition
& p
= part
[ix
];
345 p
.cache
= (T
**) ::operator new(csz
* sizeof(T
*));
346 memset(p
.cache
, 0, csz
* sizeof(T
*));
348 locks
.push_back(&p
.lock
);
356 T
* find(uint64_t hk
, const K
& k
, uint32_t flags
) {
360 lat
.p
= &(partition_of_scalar(hk
));
361 if (flags
& FLAG_LOCK
) {
362 lat
.lock
= &lat
.p
->lock
;
365 if (csz
) { /* template specialize? */
367 v
= lat
.p
->cache
[slot
];
370 if (flags
& FLAG_LOCK
)
379 iterator it
= lat
.p
->tr
.find(k
, CLT());
380 if (it
!= lat
.p
->tr
.end()){
383 /* fill cache slot at hk */
384 lat
.p
->cache
[slot
] = v
;
387 if (flags
& FLAG_LOCK
)
392 T
* find_latch(uint64_t hk
, const K
& k
, Latch
& lat
,
396 lat
.p
= &(partition_of_scalar(hk
));
397 lat
.lock
= &lat
.p
->lock
;
398 if (flags
& FLAG_LOCK
)
400 if (csz
) { /* template specialize? */
402 v
= lat
.p
->cache
[slot
];
405 if ((flags
& FLAG_LOCK
) && (flags
& FLAG_UNLOCK
))
414 check_result r
= lat
.p
->tr
.insert_unique_check(
415 k
, CLT(), lat
.commit_data
);
416 if (! r
.second
/* !insertable (i.e., !found) */) {
419 /* fill cache slot at hk */
420 lat
.p
->cache
[slot
] = v
;
423 if ((flags
& FLAG_LOCK
) && (flags
& FLAG_UNLOCK
))
428 void insert_latched(T
* v
, Latch
& lat
, uint32_t flags
) {
429 (void) lat
.p
->tr
.insert_unique_commit(*v
, lat
.commit_data
);
430 if (flags
& FLAG_UNLOCK
)
432 } /* insert_latched */
434 void insert(uint64_t hk
, T
* v
, uint32_t flags
) {
435 Partition
& p
= partition_of_scalar(hk
);
436 if (flags
& FLAG_LOCK
)
438 p
.tr
.insert_unique(*v
);
439 if (flags
& FLAG_LOCK
)
443 void remove(uint64_t hk
, T
* v
, uint32_t flags
) {
444 Partition
& p
= partition_of_scalar(hk
);
445 iterator it
= TTree::s_iterator_to(*v
);
446 if (flags
& FLAG_LOCK
)
449 if (csz
) { /* template specialize? */
450 uint32_t slot
= hk
% csz
;
451 T
* v2
= p
.cache
[slot
];
452 /* we are intrusive, just compare addresses */
454 p
.cache
[slot
] = nullptr;
456 if (flags
& FLAG_LOCK
)
460 void drain(std::function
<void(T
*)> uref
,
461 uint32_t flags
= FLAG_NONE
) {
462 /* clear a table, call supplied function on
463 * each element found (e.g., retuns sentinel
465 Object::Queue2 drain_q
;
466 for (int t_ix
= 0; t_ix
< n_part
; ++t_ix
) {
467 Partition
& p
= part
[t_ix
];
468 if (flags
& FLAG_LOCK
) /* LOCKED */
470 while (p
.tr
.size() > 0) {
471 iterator it
= p
.tr
.begin();
474 drain_q
.push_front(*v
);
476 if (flags
& FLAG_LOCK
) /* we locked it, !LOCKED */
478 } /* each partition */
479 /* unref out-of-line && !LOCKED */
480 while (drain_q
.size() > 0) {
481 Object::Queue2::iterator it
= drain_q
.begin();
482 T
* v
= static_cast<T
*>(&(*it
));
483 drain_q
.erase(it
); /* must precede uref(v) in safe_link mode */
490 std::vector
<LK
*> locks
;
493 } /* namespace LRU */
494 } /* namespace cohort */
496 #endif /* COHORT_LRU_H */