1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Copyright (C) 2015 CohortFS, LLC.
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
16 #include <boost/intrusive/list.hpp>
18 #include "common/likely.h"
20 #ifndef CACHE_LINE_SIZE
21 #define CACHE_LINE_SIZE 64 /* XXX arch-specific define */
23 #define CACHE_PAD(_n) char __pad ## _n [CACHE_LINE_SIZE]
29 namespace bi
= boost::intrusive
;
31 /* public flag values */
32 constexpr uint32_t FLAG_NONE
= 0x0000;
33 constexpr uint32_t FLAG_INITIAL
= 0x0001;
35 enum class Edge
: std::uint8_t
41 typedef bi::link_mode
<bi::safe_link
> link_mode
; // for debugging
47 std::atomic
<uint32_t> lru_refcnt
;
48 std::atomic
<uint32_t> lru_adj
;
49 bi::list_member_hook
< link_mode
> lru_hook
;
51 typedef bi::list
<Object
,
53 Object
, bi::list_member_hook
< link_mode
>,
55 bi::constant_time_size
<true>> Queue
;
59 Object() : lru_flags(FLAG_NONE
), lru_refcnt(0), lru_adj(0) {}
61 uint32_t get_refcnt() const { return lru_refcnt
; }
63 virtual bool reclaim() = 0;
68 template <typename LK
>
72 /* allocator & recycler interface (create or re-use LRU objects) */
76 virtual Object
* alloc(void) = 0;
77 virtual void recycle(Object
*) = 0;
78 virtual ~ObjectFactory() {};
81 template <typename LK
>
89 // Object::Queue pinned; /* placeholder for possible expansion */
96 std::atomic
<uint32_t> evict_lane
;
97 const uint32_t lane_hiwat
;
99 static constexpr uint32_t lru_adj_modulus
= 5;
101 static constexpr uint32_t SENTINEL_REFCNT
= 1;
103 /* internal flag values */
104 static constexpr uint32_t FLAG_INLRU
= 0x0001;
105 static constexpr uint32_t FLAG_PINNED
= 0x0002; // possible future use
106 static constexpr uint32_t FLAG_EVICTING
= 0x0004;
108 Lane
& lane_of(void* addr
) {
109 return qlane
[(uint64_t)(addr
) % n_lanes
];
112 uint32_t next_evict_lane() {
113 return (evict_lane
++ % n_lanes
);
116 bool can_reclaim(Object
* o
) {
117 return ((o
->lru_refcnt
== SENTINEL_REFCNT
) &&
118 (!(o
->lru_flags
& FLAG_EVICTING
)));
121 Object
* evict_block() {
122 uint32_t lane_ix
= next_evict_lane();
123 for (int ix
= 0; ix
< n_lanes
; ++ix
,
124 lane_ix
= next_evict_lane()) {
125 Lane
& lane
= qlane
[lane_ix
];
126 /* if object at LRU has refcnt==1, it may be reclaimable */
127 Object
* o
= &(lane
.q
.back());
128 #if 0 /* XXX save for refactor */
129 std::cout
<< __func__
131 << " refcnt: " << o
->lru_refcnt
134 if (can_reclaim(o
)) {
136 o
->lru_flags
|= FLAG_EVICTING
;
141 /* assertions that o state has not changed across
143 assert(o
->lru_refcnt
== SENTINEL_REFCNT
);
144 assert(o
->lru_flags
& FLAG_INLRU
);
145 Object::Queue::iterator it
=
146 Object::Queue::s_iterator_to(*o
);
151 // XXX can't make unreachable (means what?)
154 o
->lru_flags
&= ~FLAG_EVICTING
;
155 /* unlock in next block */
157 } /* can_reclaim(o) */
165 LRU(int lanes
, uint32_t _hiwat
)
166 : n_lanes(lanes
), evict_lane(0), lane_hiwat(_hiwat
)
169 qlane
= new Lane
[n_lanes
];
172 ~LRU() { delete[] qlane
; }
174 bool ref(Object
* o
, uint32_t flags
) {
176 if (flags
& FLAG_INITIAL
) {
177 if ((++(o
->lru_adj
) % lru_adj_modulus
) == 0) {
178 Lane
& lane
= lane_of(o
);
181 Object::Queue::iterator it
=
182 Object::Queue::s_iterator_to(*o
);
184 lane
.q
.push_front(*o
);
191 void unref(Object
* o
, uint32_t flags
) {
192 uint32_t refcnt
= --(o
->lru_refcnt
);
193 if (unlikely(refcnt
== 0)) {
194 Lane
& lane
= lane_of(o
);
196 refcnt
= o
->lru_refcnt
.load();
197 if (unlikely(refcnt
== 0)) {
198 Object::Queue::iterator it
=
199 Object::Queue::s_iterator_to(*o
);
204 } else if (unlikely(refcnt
== SENTINEL_REFCNT
)) {
205 Lane
& lane
= lane_of(o
);
207 refcnt
= o
->lru_refcnt
.load();
208 if (likely(refcnt
== SENTINEL_REFCNT
)) {
210 Object::Queue::iterator it
=
211 Object::Queue::s_iterator_to(*o
);
214 if (lane
.q
.size() > lane_hiwat
) {
217 lane
.q
.push_back(*o
);
224 Object
* insert(ObjectFactory
* fac
, Edge edge
, uint32_t flags
) {
225 /* use supplied functor to re-use an evicted object, or
226 * allocate a new one of the descendant type */
227 Object
* o
= evict_block();
229 fac
->recycle(o
); /* recycle existing object */
231 o
= fac
->alloc(); /* get a new one */
233 o
->lru_flags
= FLAG_INLRU
;
235 Lane
& lane
= lane_of(o
);
239 lane
.q
.push_front(*o
);
242 lane
.q
.push_back(*o
);
248 if (flags
& FLAG_INITIAL
)
249 o
->lru_refcnt
+= 2; /* sentinel ref + initial */
251 ++(o
->lru_refcnt
); /* sentinel */
258 template <typename T
, typename TTree
, typename CLT
, typename CEQ
,
259 typename K
, typename LK
>
264 static constexpr uint32_t FLAG_NONE
= 0x0000;
265 static constexpr uint32_t FLAG_LOCK
= 0x0001;
266 static constexpr uint32_t FLAG_UNLOCK
= 0x0002;
267 static constexpr uint32_t FLAG_UNLOCK_ON_MISS
= 0x0004;
269 typedef T value_type
;
270 typedef TTree container_type
;
271 typedef typename
TTree::iterator iterator
;
272 typedef std::pair
<iterator
, bool> check_result
;
273 typedef typename
TTree::insert_commit_data insert_commit_data
;
277 typedef std::unique_lock
<LK
> unique_lock
;
286 Partition() : tr(), cache(nullptr), csz(0) {}
290 ::operator delete(cache
);
297 insert_commit_data commit_data
;
299 Latch() : p(nullptr), lock(nullptr) {}
302 Partition
& partition_of_scalar(uint64_t x
) {
303 return part
[x
% n_part
];
306 Partition
& get(uint8_t x
) {
315 std::for_each(locks
.begin(), locks
.end(),
316 [](LK
* lk
){ lk
->lock(); });
320 std::for_each(locks
.begin(), locks
.end(),
321 [](LK
* lk
){ lk
->unlock(); });
324 TreeX(int n_part
=1, int csz
=127) : n_part(n_part
), csz(csz
) {
326 part
= new Partition
[n_part
];
327 for (int ix
= 0; ix
< n_part
; ++ix
) {
328 Partition
& p
= part
[ix
];
331 p
.cache
= (T
**) ::operator new(csz
* sizeof(T
*));
332 memset(p
.cache
, 0, csz
* sizeof(T
*));
334 locks
.push_back(&p
.lock
);
342 T
* find(uint64_t hk
, const K
& k
, uint32_t flags
) {
346 lat
.p
= &(partition_of_scalar(hk
));
347 if (flags
& FLAG_LOCK
) {
348 lat
.lock
= &lat
.p
->lock
;
351 if (csz
) { /* template specialize? */
353 v
= lat
.p
->cache
[slot
];
356 if (flags
& FLAG_LOCK
)
365 iterator it
= lat
.p
->tr
.find(k
, CLT());
366 if (it
!= lat
.p
->tr
.end()){
369 /* fill cache slot at hk */
370 lat
.p
->cache
[slot
] = v
;
373 if (flags
& FLAG_LOCK
)
378 T
* find_latch(uint64_t hk
, const K
& k
, Latch
& lat
,
382 lat
.p
= &(partition_of_scalar(hk
));
383 lat
.lock
= &lat
.p
->lock
;
384 if (flags
& FLAG_LOCK
)
386 if (csz
) { /* template specialize? */
388 v
= lat
.p
->cache
[slot
];
391 if (flags
& (FLAG_LOCK
|FLAG_UNLOCK
))
400 check_result r
= lat
.p
->tr
.insert_unique_check(
401 k
, CLT(), lat
.commit_data
);
402 if (! r
.second
/* !insertable (i.e., !found) */) {
405 /* fill cache slot at hk */
406 lat
.p
->cache
[slot
] = v
;
409 if (flags
& (FLAG_LOCK
|FLAG_UNLOCK
))
414 void insert_latched(T
* v
, Latch
& lat
, uint32_t flags
) {
415 (void) lat
.p
->tr
.insert_unique_commit(*v
, lat
.commit_data
);
416 if (flags
& FLAG_UNLOCK
)
418 } /* insert_latched */
420 void insert(uint64_t hk
, T
* v
, uint32_t flags
) {
421 Partition
& p
= partition_of_scalar(hk
);
422 if (flags
& FLAG_LOCK
)
424 p
.tr
.insert_unique(*v
);
425 if (flags
& FLAG_LOCK
)
429 void remove(uint64_t hk
, T
* v
, uint32_t flags
) {
430 Partition
& p
= partition_of_scalar(hk
);
431 iterator it
= TTree::s_iterator_to(*v
);
432 if (flags
& FLAG_LOCK
)
435 if (csz
) { /* template specialize? */
436 uint32_t slot
= hk
% csz
;
437 T
* v2
= p
.cache
[slot
];
438 /* we are intrusive, just compare addresses */
440 p
.cache
[slot
] = nullptr;
442 if (flags
& FLAG_LOCK
)
446 void drain(std::function
<void(T
*)> uref
,
447 uint32_t flags
= FLAG_NONE
) {
448 /* clear a table, call supplied function on
449 * each element found (e.g., retuns sentinel
451 for (int t_ix
= 0; t_ix
< n_part
; ++t_ix
) {
452 Partition
& p
= part
[t_ix
];
453 if (flags
& FLAG_LOCK
) /* LOCKED */
455 while (p
.tr
.size() > 0) {
456 iterator it
= p
.tr
.begin();
458 p
.tr
.erase(it
); /* must precede uref(v), in
462 if (flags
& FLAG_LOCK
) /* we locked it, !LOCKED */
464 } /* each partition */
469 std::vector
<LK
*> locks
;
472 } /* namespace LRU */
473 } /* namespace cohort */
475 #endif /* COHORT_LRU_H */