]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/list_lru.h
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-eoan-kernel.git] / include / linux / list_lru.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8 #ifndef _LRU_LIST_H
9 #define _LRU_LIST_H
10
11 #include <linux/list.h>
12 #include <linux/nodemask.h>
13 #include <linux/shrinker.h>
14
15 struct mem_cgroup;
16
17 /* list_lru_walk_cb has to always return one of those */
18 enum lru_status {
19 LRU_REMOVED, /* item removed from list */
20 LRU_REMOVED_RETRY, /* item removed, but lock has been
21 dropped and reacquired */
22 LRU_ROTATE, /* item referenced, give another pass */
23 LRU_SKIP, /* item cannot be locked, skip */
24 LRU_RETRY, /* item not freeable. May drop the lock
25 internally, but has to return locked. */
26 };
27
28 struct list_lru_one {
29 struct list_head list;
30 /* may become negative during memcg reparenting */
31 long nr_items;
32 };
33
34 struct list_lru_memcg {
35 struct rcu_head rcu;
36 /* array of per cgroup lists, indexed by memcg_cache_id */
37 struct list_lru_one *lru[0];
38 };
39
40 struct list_lru_node {
41 /* protects all lists on the node, including per cgroup */
42 spinlock_t lock;
43 /* global list, used for the root cgroup in cgroup aware lrus */
44 struct list_lru_one lru;
45 #ifdef CONFIG_MEMCG_KMEM
46 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
47 struct list_lru_memcg __rcu *memcg_lrus;
48 #endif
49 long nr_items;
50 } ____cacheline_aligned_in_smp;
51
52 struct list_lru {
53 struct list_lru_node *node;
54 #ifdef CONFIG_MEMCG_KMEM
55 struct list_head list;
56 int shrinker_id;
57 #endif
58 };
59
60 void list_lru_destroy(struct list_lru *lru);
61 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
62 struct lock_class_key *key, struct shrinker *shrinker);
63
64 #define list_lru_init(lru) \
65 __list_lru_init((lru), false, NULL, NULL)
66 #define list_lru_init_key(lru, key) \
67 __list_lru_init((lru), false, (key), NULL)
68 #define list_lru_init_memcg(lru, shrinker) \
69 __list_lru_init((lru), true, NULL, shrinker)
70
71 int memcg_update_all_list_lrus(int num_memcgs);
72 void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
73
74 /**
75 * list_lru_add: add an element to the lru list's tail
76 * @list_lru: the lru pointer
77 * @item: the item to be added.
78 *
79 * If the element is already part of a list, this function returns doing
80 * nothing. Therefore the caller does not need to keep state about whether or
81 * not the element already belongs in the list and is allowed to lazy update
82 * it. Note however that this is valid for *a* list, not *this* list. If
83 * the caller organize itself in a way that elements can be in more than
84 * one type of list, it is up to the caller to fully remove the item from
85 * the previous list (with list_lru_del() for instance) before moving it
86 * to @list_lru
87 *
88 * Return value: true if the list was updated, false otherwise
89 */
90 bool list_lru_add(struct list_lru *lru, struct list_head *item);
91
92 /**
93 * list_lru_del: delete an element to the lru list
94 * @list_lru: the lru pointer
95 * @item: the item to be deleted.
96 *
97 * This function works analogously as list_lru_add in terms of list
98 * manipulation. The comments about an element already pertaining to
99 * a list are also valid for list_lru_del.
100 *
101 * Return value: true if the list was updated, false otherwise
102 */
103 bool list_lru_del(struct list_lru *lru, struct list_head *item);
104
105 /**
106 * list_lru_count_one: return the number of objects currently held by @lru
107 * @lru: the lru pointer.
108 * @nid: the node id to count from.
109 * @memcg: the cgroup to count from.
110 *
111 * Always return a non-negative number, 0 for empty lists. There is no
112 * guarantee that the list is not updated while the count is being computed.
113 * Callers that want such a guarantee need to provide an outer lock.
114 */
115 unsigned long list_lru_count_one(struct list_lru *lru,
116 int nid, struct mem_cgroup *memcg);
117 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
118
119 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
120 struct shrink_control *sc)
121 {
122 return list_lru_count_one(lru, sc->nid, sc->memcg);
123 }
124
125 static inline unsigned long list_lru_count(struct list_lru *lru)
126 {
127 long count = 0;
128 int nid;
129
130 for_each_node_state(nid, N_NORMAL_MEMORY)
131 count += list_lru_count_node(lru, nid);
132
133 return count;
134 }
135
136 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
137 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
138 struct list_head *head);
139
140 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
141 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
142
143 /**
144 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
145 * @lru: the lru pointer.
146 * @nid: the node id to scan from.
147 * @memcg: the cgroup to scan from.
148 * @isolate: callback function that is resposible for deciding what to do with
149 * the item currently being scanned
150 * @cb_arg: opaque type that will be passed to @isolate
151 * @nr_to_walk: how many items to scan.
152 *
153 * This function will scan all elements in a particular list_lru, calling the
154 * @isolate callback for each of those items, along with the current list
155 * spinlock and a caller-provided opaque. The @isolate callback can choose to
156 * drop the lock internally, but *must* return with the lock held. The callback
157 * will return an enum lru_status telling the list_lru infrastructure what to
158 * do with the object being scanned.
159 *
160 * Please note that nr_to_walk does not mean how many objects will be freed,
161 * just how many objects will be scanned.
162 *
163 * Return value: the number of objects effectively removed from the LRU.
164 */
165 unsigned long list_lru_walk_one(struct list_lru *lru,
166 int nid, struct mem_cgroup *memcg,
167 list_lru_walk_cb isolate, void *cb_arg,
168 unsigned long *nr_to_walk);
169 /**
170 * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
171 * @lru: the lru pointer.
172 * @nid: the node id to scan from.
173 * @memcg: the cgroup to scan from.
174 * @isolate: callback function that is resposible for deciding what to do with
175 * the item currently being scanned
176 * @cb_arg: opaque type that will be passed to @isolate
177 * @nr_to_walk: how many items to scan.
178 *
179 * Same as @list_lru_walk_one except that the spinlock is acquired with
180 * spin_lock_irq().
181 */
182 unsigned long list_lru_walk_one_irq(struct list_lru *lru,
183 int nid, struct mem_cgroup *memcg,
184 list_lru_walk_cb isolate, void *cb_arg,
185 unsigned long *nr_to_walk);
186 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
187 list_lru_walk_cb isolate, void *cb_arg,
188 unsigned long *nr_to_walk);
189
190 static inline unsigned long
191 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
192 list_lru_walk_cb isolate, void *cb_arg)
193 {
194 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
195 &sc->nr_to_scan);
196 }
197
198 static inline unsigned long
199 list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
200 list_lru_walk_cb isolate, void *cb_arg)
201 {
202 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
203 &sc->nr_to_scan);
204 }
205
206 static inline unsigned long
207 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
208 void *cb_arg, unsigned long nr_to_walk)
209 {
210 long isolated = 0;
211 int nid;
212
213 for_each_node_state(nid, N_NORMAL_MEMORY) {
214 isolated += list_lru_walk_node(lru, nid, isolate,
215 cb_arg, &nr_to_walk);
216 if (nr_to_walk <= 0)
217 break;
218 }
219 return isolated;
220 }
221 #endif /* _LRU_LIST_H */