1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
11 #include <linux/list.h>
12 #include <linux/nodemask.h>
13 #include <linux/shrinker.h>
17 /* list_lru_walk_cb has to always return one of those */
19 LRU_REMOVED
, /* item removed from list */
20 LRU_REMOVED_RETRY
, /* item removed, but lock has been
21 dropped and reacquired */
22 LRU_ROTATE
, /* item referenced, give another pass */
23 LRU_SKIP
, /* item cannot be locked, skip */
24 LRU_RETRY
, /* item not freeable. May drop the lock
25 internally, but has to return locked. */
29 struct list_head list
;
30 /* may become negative during memcg reparenting */
34 struct list_lru_memcg
{
35 /* array of per cgroup lists, indexed by memcg_cache_id */
36 struct list_lru_one
*lru
[0];
39 struct list_lru_node
{
40 /* protects all lists on the node, including per cgroup */
42 /* global list, used for the root cgroup in cgroup aware lrus */
43 struct list_lru_one lru
;
44 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
45 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
46 struct list_lru_memcg
*memcg_lrus
;
49 } ____cacheline_aligned_in_smp
;
52 struct list_lru_node
*node
;
53 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
54 struct list_head list
;
59 void list_lru_destroy(struct list_lru
*lru
);
60 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
61 struct lock_class_key
*key
);
63 #define list_lru_init(lru) __list_lru_init((lru), false, NULL)
64 #define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
65 #define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
67 int memcg_update_all_list_lrus(int num_memcgs
);
68 void memcg_drain_all_list_lrus(int src_idx
, int dst_idx
);
71 * list_lru_add: add an element to the lru list's tail
72 * @list_lru: the lru pointer
73 * @item: the item to be added.
75 * If the element is already part of a list, this function returns doing
76 * nothing. Therefore the caller does not need to keep state about whether or
77 * not the element already belongs in the list and is allowed to lazy update
78 * it. Note however that this is valid for *a* list, not *this* list. If
79 * the caller organize itself in a way that elements can be in more than
80 * one type of list, it is up to the caller to fully remove the item from
81 * the previous list (with list_lru_del() for instance) before moving it
84 * Return value: true if the list was updated, false otherwise
86 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
);
89 * list_lru_del: delete an element to the lru list
90 * @list_lru: the lru pointer
91 * @item: the item to be deleted.
93 * This function works analogously as list_lru_add in terms of list
94 * manipulation. The comments about an element already pertaining to
95 * a list are also valid for list_lru_del.
97 * Return value: true if the list was updated, false otherwise
99 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
);
102 * list_lru_count_one: return the number of objects currently held by @lru
103 * @lru: the lru pointer.
104 * @nid: the node id to count from.
105 * @memcg: the cgroup to count from.
107 * Always return a non-negative number, 0 for empty lists. There is no
108 * guarantee that the list is not updated while the count is being computed.
109 * Callers that want such a guarantee need to provide an outer lock.
111 unsigned long list_lru_count_one(struct list_lru
*lru
,
112 int nid
, struct mem_cgroup
*memcg
);
113 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
);
115 static inline unsigned long list_lru_shrink_count(struct list_lru
*lru
,
116 struct shrink_control
*sc
)
118 return list_lru_count_one(lru
, sc
->nid
, sc
->memcg
);
121 static inline unsigned long list_lru_count(struct list_lru
*lru
)
126 for_each_node_state(nid
, N_NORMAL_MEMORY
)
127 count
+= list_lru_count_node(lru
, nid
);
132 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
);
133 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
134 struct list_head
*head
);
136 typedef enum lru_status (*list_lru_walk_cb
)(struct list_head
*item
,
137 struct list_lru_one
*list
, spinlock_t
*lock
, void *cb_arg
);
140 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
141 * @lru: the lru pointer.
142 * @nid: the node id to scan from.
143 * @memcg: the cgroup to scan from.
144 * @isolate: callback function that is resposible for deciding what to do with
145 * the item currently being scanned
146 * @cb_arg: opaque type that will be passed to @isolate
147 * @nr_to_walk: how many items to scan.
149 * This function will scan all elements in a particular list_lru, calling the
150 * @isolate callback for each of those items, along with the current list
151 * spinlock and a caller-provided opaque. The @isolate callback can choose to
152 * drop the lock internally, but *must* return with the lock held. The callback
153 * will return an enum lru_status telling the list_lru infrastructure what to
154 * do with the object being scanned.
156 * Please note that nr_to_walk does not mean how many objects will be freed,
157 * just how many objects will be scanned.
159 * Return value: the number of objects effectively removed from the LRU.
161 unsigned long list_lru_walk_one(struct list_lru
*lru
,
162 int nid
, struct mem_cgroup
*memcg
,
163 list_lru_walk_cb isolate
, void *cb_arg
,
164 unsigned long *nr_to_walk
);
165 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
166 list_lru_walk_cb isolate
, void *cb_arg
,
167 unsigned long *nr_to_walk
);
169 static inline unsigned long
170 list_lru_shrink_walk(struct list_lru
*lru
, struct shrink_control
*sc
,
171 list_lru_walk_cb isolate
, void *cb_arg
)
173 return list_lru_walk_one(lru
, sc
->nid
, sc
->memcg
, isolate
, cb_arg
,
177 static inline unsigned long
178 list_lru_walk(struct list_lru
*lru
, list_lru_walk_cb isolate
,
179 void *cb_arg
, unsigned long nr_to_walk
)
184 for_each_node_state(nid
, N_NORMAL_MEMORY
) {
185 isolated
+= list_lru_walk_node(lru
, nid
, isolate
,
186 cb_arg
, &nr_to_walk
);
192 #endif /* _LRU_LIST_H */