1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Copyright (C) 2015 CohortFS, LLC.
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
16 #include <boost/intrusive/list.hpp>
17 #include <boost/intrusive/slist.hpp>
19 #include "common/likely.h"
21 #ifndef CACHE_LINE_SIZE
22 #define CACHE_LINE_SIZE 64 /* XXX arch-specific define */
24 #define CACHE_PAD(_n) char __pad ## _n [CACHE_LINE_SIZE]
30 namespace bi
= boost::intrusive
;
32 /* public flag values */
33 constexpr uint32_t FLAG_NONE
= 0x0000;
34 constexpr uint32_t FLAG_INITIAL
= 0x0001;
36 enum class Edge
: std::uint8_t
42 typedef bi::link_mode
<bi::safe_link
> link_mode
;
48 std::atomic
<uint32_t> lru_refcnt
;
49 std::atomic
<uint32_t> lru_adj
;
50 bi::list_member_hook
< link_mode
> lru_hook
;
52 typedef bi::list
<Object
,
54 Object
, bi::list_member_hook
< link_mode
>,
56 bi::constant_time_size
<true>> Queue
;
58 bi::slist_member_hook
< link_mode
> q2_hook
;
60 typedef bi::slist
<Object
,
62 Object
, bi::slist_member_hook
< link_mode
>,
64 bi::constant_time_size
<true>> Queue2
;
68 Object() : lru_flags(FLAG_NONE
), lru_refcnt(0), lru_adj(0) {}
70 uint32_t get_refcnt() const { return lru_refcnt
; }
72 virtual bool reclaim() = 0;
77 template <typename LK
>
80 template <typename T
, typename TTree
, typename CLT
, typename CEQ
,
81 typename K
, typename LK
>
85 /* allocator & recycler interface (create or re-use LRU objects) */
89 virtual Object
* alloc(void) = 0;
90 virtual void recycle(Object
*) = 0;
91 virtual ~ObjectFactory() {};
94 template <typename LK
>
102 // Object::Queue pinned; /* placeholder for possible expansion */
109 std::atomic
<uint32_t> evict_lane
;
110 const uint32_t lane_hiwat
;
112 static constexpr uint32_t lru_adj_modulus
= 5;
114 static constexpr uint32_t SENTINEL_REFCNT
= 1;
116 /* internal flag values */
117 static constexpr uint32_t FLAG_INLRU
= 0x0001;
118 static constexpr uint32_t FLAG_PINNED
= 0x0002; // possible future use
119 static constexpr uint32_t FLAG_EVICTING
= 0x0004;
121 Lane
& lane_of(void* addr
) {
122 return qlane
[(uint64_t)(addr
) % n_lanes
];
125 uint32_t next_evict_lane() {
126 return (evict_lane
++ % n_lanes
);
129 bool can_reclaim(Object
* o
) {
130 return ((o
->lru_refcnt
== SENTINEL_REFCNT
) &&
131 (!(o
->lru_flags
& FLAG_EVICTING
)));
134 Object
* evict_block() {
135 uint32_t lane_ix
= next_evict_lane();
136 for (int ix
= 0; ix
< n_lanes
; ++ix
,
137 lane_ix
= next_evict_lane()) {
138 Lane
& lane
= qlane
[lane_ix
];
140 /* if object at LRU has refcnt==1, it may be reclaimable */
141 Object
* o
= &(lane
.q
.back());
142 if (can_reclaim(o
)) {
144 o
->lru_flags
|= FLAG_EVICTING
;
149 /* assertions that o state has not changed across
151 assert(o
->lru_refcnt
== SENTINEL_REFCNT
);
152 assert(o
->lru_flags
& FLAG_INLRU
);
153 Object::Queue::iterator it
=
154 Object::Queue::s_iterator_to(*o
);
159 // XXX can't make unreachable (means what?)
161 o
->lru_flags
&= ~FLAG_EVICTING
;
162 /* unlock in next block */
164 } /* can_reclaim(o) */
172 LRU(int lanes
, uint32_t _hiwat
)
173 : n_lanes(lanes
), evict_lane(0), lane_hiwat(_hiwat
)
176 qlane
= new Lane
[n_lanes
];
179 ~LRU() { delete[] qlane
; }
181 bool ref(Object
* o
, uint32_t flags
) {
183 if (flags
& FLAG_INITIAL
) {
184 if ((++(o
->lru_adj
) % lru_adj_modulus
) == 0) {
185 Lane
& lane
= lane_of(o
);
188 Object::Queue::iterator it
=
189 Object::Queue::s_iterator_to(*o
);
191 lane
.q
.push_front(*o
);
198 void unref(Object
* o
, uint32_t flags
) {
199 uint32_t refcnt
= --(o
->lru_refcnt
);
200 Object
* tdo
= nullptr;
201 if (unlikely(refcnt
== 0)) {
202 Lane
& lane
= lane_of(o
);
204 refcnt
= o
->lru_refcnt
.load();
205 if (unlikely(refcnt
== 0)) {
206 Object::Queue::iterator it
=
207 Object::Queue::s_iterator_to(*o
);
212 } else if (unlikely(refcnt
== SENTINEL_REFCNT
)) {
213 Lane
& lane
= lane_of(o
);
215 refcnt
= o
->lru_refcnt
.load();
216 if (likely(refcnt
== SENTINEL_REFCNT
)) {
218 Object::Queue::iterator it
=
219 Object::Queue::s_iterator_to(*o
);
222 if (lane
.q
.size() > lane_hiwat
) {
225 lane
.q
.push_back(*o
);
230 /* unref out-of-line && !LOCKED */
235 Object
* insert(ObjectFactory
* fac
, Edge edge
, uint32_t flags
) {
236 /* use supplied functor to re-use an evicted object, or
237 * allocate a new one of the descendant type */
238 Object
* o
= evict_block();
240 fac
->recycle(o
); /* recycle existing object */
242 o
= fac
->alloc(); /* get a new one */
244 o
->lru_flags
= FLAG_INLRU
;
246 Lane
& lane
= lane_of(o
);
250 lane
.q
.push_front(*o
);
253 lane
.q
.push_back(*o
);
259 if (flags
& FLAG_INITIAL
)
260 o
->lru_refcnt
+= 2; /* sentinel ref + initial */
262 ++(o
->lru_refcnt
); /* sentinel */
269 template <typename T
, typename TTree
, typename CLT
, typename CEQ
,
270 typename K
, typename LK
>
275 static constexpr uint32_t FLAG_NONE
= 0x0000;
276 static constexpr uint32_t FLAG_LOCK
= 0x0001;
277 static constexpr uint32_t FLAG_UNLOCK
= 0x0002;
278 static constexpr uint32_t FLAG_UNLOCK_ON_MISS
= 0x0004;
280 typedef T value_type
;
281 typedef TTree container_type
;
282 typedef typename
TTree::iterator iterator
;
283 typedef std::pair
<iterator
, bool> check_result
;
284 typedef typename
TTree::insert_commit_data insert_commit_data
;
288 typedef std::unique_lock
<LK
> unique_lock
;
297 Partition() : tr(), cache(nullptr), csz(0) {}
301 ::operator delete(cache
);
308 insert_commit_data commit_data
;
310 Latch() : p(nullptr), lock(nullptr) {}
313 Partition
& partition_of_scalar(uint64_t x
) {
314 return part
[x
% n_part
];
317 Partition
& get(uint8_t x
) {
326 std::for_each(locks
.begin(), locks
.end(),
327 [](LK
* lk
){ lk
->lock(); });
331 std::for_each(locks
.begin(), locks
.end(),
332 [](LK
* lk
){ lk
->unlock(); });
335 TreeX(int n_part
=1, int csz
=127) : n_part(n_part
), csz(csz
) {
337 part
= new Partition
[n_part
];
338 for (int ix
= 0; ix
< n_part
; ++ix
) {
339 Partition
& p
= part
[ix
];
342 p
.cache
= (T
**) ::operator new(csz
* sizeof(T
*));
343 memset(p
.cache
, 0, csz
* sizeof(T
*));
345 locks
.push_back(&p
.lock
);
353 T
* find(uint64_t hk
, const K
& k
, uint32_t flags
) {
357 lat
.p
= &(partition_of_scalar(hk
));
358 if (flags
& FLAG_LOCK
) {
359 lat
.lock
= &lat
.p
->lock
;
362 if (csz
) { /* template specialize? */
364 v
= lat
.p
->cache
[slot
];
367 if (flags
& FLAG_LOCK
)
376 iterator it
= lat
.p
->tr
.find(k
, CLT());
377 if (it
!= lat
.p
->tr
.end()){
380 /* fill cache slot at hk */
381 lat
.p
->cache
[slot
] = v
;
384 if (flags
& FLAG_LOCK
)
389 T
* find_latch(uint64_t hk
, const K
& k
, Latch
& lat
,
393 lat
.p
= &(partition_of_scalar(hk
));
394 lat
.lock
= &lat
.p
->lock
;
395 if (flags
& FLAG_LOCK
)
397 if (csz
) { /* template specialize? */
399 v
= lat
.p
->cache
[slot
];
402 if ((flags
& FLAG_LOCK
) && (flags
& FLAG_UNLOCK
))
411 check_result r
= lat
.p
->tr
.insert_unique_check(
412 k
, CLT(), lat
.commit_data
);
413 if (! r
.second
/* !insertable (i.e., !found) */) {
416 /* fill cache slot at hk */
417 lat
.p
->cache
[slot
] = v
;
420 if ((flags
& FLAG_LOCK
) && (flags
& FLAG_UNLOCK
))
425 void insert_latched(T
* v
, Latch
& lat
, uint32_t flags
) {
426 (void) lat
.p
->tr
.insert_unique_commit(*v
, lat
.commit_data
);
427 if (flags
& FLAG_UNLOCK
)
429 } /* insert_latched */
431 void insert(uint64_t hk
, T
* v
, uint32_t flags
) {
432 Partition
& p
= partition_of_scalar(hk
);
433 if (flags
& FLAG_LOCK
)
435 p
.tr
.insert_unique(*v
);
436 if (flags
& FLAG_LOCK
)
440 void remove(uint64_t hk
, T
* v
, uint32_t flags
) {
441 Partition
& p
= partition_of_scalar(hk
);
442 iterator it
= TTree::s_iterator_to(*v
);
443 if (flags
& FLAG_LOCK
)
446 if (csz
) { /* template specialize? */
447 uint32_t slot
= hk
% csz
;
448 T
* v2
= p
.cache
[slot
];
449 /* we are intrusive, just compare addresses */
451 p
.cache
[slot
] = nullptr;
453 if (flags
& FLAG_LOCK
)
457 void drain(std::function
<void(T
*)> uref
,
458 uint32_t flags
= FLAG_NONE
) {
459 /* clear a table, call supplied function on
460 * each element found (e.g., retuns sentinel
462 Object::Queue2 drain_q
;
463 for (int t_ix
= 0; t_ix
< n_part
; ++t_ix
) {
464 Partition
& p
= part
[t_ix
];
465 if (flags
& FLAG_LOCK
) /* LOCKED */
467 while (p
.tr
.size() > 0) {
468 iterator it
= p
.tr
.begin();
471 drain_q
.push_front(*v
);
473 if (flags
& FLAG_LOCK
) /* we locked it, !LOCKED */
475 } /* each partition */
476 /* unref out-of-line && !LOCKED */
477 while (drain_q
.size() > 0) {
478 Object::Queue2::iterator it
= drain_q
.begin();
479 T
* v
= static_cast<T
*>(&(*it
));
480 drain_q
.erase(it
); /* must precede uref(v) in safe_link mode */
487 std::vector
<LK
*> locks
;
490 } /* namespace LRU */
491 } /* namespace cohort */
493 #endif /* COHORT_LRU_H */