]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a38e4082 DC |
2 | /* |
3 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | |
4 | * Authors: David Chinner and Glauber Costa | |
5 | * | |
6 | * Generic LRU infrastructure | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
3b1d58a4 | 10 | #include <linux/mm.h> |
a38e4082 | 11 | #include <linux/list_lru.h> |
5ca302c8 | 12 | #include <linux/slab.h> |
c0a5b560 | 13 | #include <linux/mutex.h> |
60d3fd32 | 14 | #include <linux/memcontrol.h> |
4d96ba35 | 15 | #include "slab.h" |
c0a5b560 | 16 | |
84c07d11 | 17 | #ifdef CONFIG_MEMCG_KMEM |
c0a5b560 VD |
18 | static LIST_HEAD(list_lrus); |
19 | static DEFINE_MUTEX(list_lrus_mutex); | |
20 | ||
21 | static void list_lru_register(struct list_lru *lru) | |
22 | { | |
23 | mutex_lock(&list_lrus_mutex); | |
24 | list_add(&lru->list, &list_lrus); | |
25 | mutex_unlock(&list_lrus_mutex); | |
26 | } | |
27 | ||
28 | static void list_lru_unregister(struct list_lru *lru) | |
29 | { | |
30 | mutex_lock(&list_lrus_mutex); | |
31 | list_del(&lru->list); | |
32 | mutex_unlock(&list_lrus_mutex); | |
33 | } | |
c0a5b560 | 34 | |
fae91d6d KT |
35 | static int lru_shrinker_id(struct list_lru *lru) |
36 | { | |
37 | return lru->shrinker_id; | |
38 | } | |
39 | ||
60d3fd32 VD |
40 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
41 | { | |
3e858996 | 42 | return lru->memcg_aware; |
60d3fd32 VD |
43 | } |
44 | ||
45 | static inline struct list_lru_one * | |
46 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) | |
47 | { | |
0c7c1bed | 48 | struct list_lru_memcg *memcg_lrus; |
60d3fd32 | 49 | /* |
0c7c1bed KT |
50 | * Either lock or RCU protects the array of per cgroup lists |
51 | * from relocation (see memcg_update_list_lru_node). | |
60d3fd32 | 52 | */ |
0c7c1bed KT |
53 | memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, |
54 | lockdep_is_held(&nlru->lock)); | |
55 | if (memcg_lrus && idx >= 0) | |
56 | return memcg_lrus->lru[idx]; | |
60d3fd32 VD |
57 | return &nlru->lru; |
58 | } | |
59 | ||
60 | static inline struct list_lru_one * | |
44bd4a47 KT |
61 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
62 | struct mem_cgroup **memcg_ptr) | |
60d3fd32 | 63 | { |
44bd4a47 KT |
64 | struct list_lru_one *l = &nlru->lru; |
65 | struct mem_cgroup *memcg = NULL; | |
60d3fd32 VD |
66 | |
67 | if (!nlru->memcg_lrus) | |
44bd4a47 | 68 | goto out; |
60d3fd32 | 69 | |
4f103c63 | 70 | memcg = mem_cgroup_from_obj(ptr); |
60d3fd32 | 71 | if (!memcg) |
44bd4a47 | 72 | goto out; |
60d3fd32 | 73 | |
44bd4a47 KT |
74 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
75 | out: | |
76 | if (memcg_ptr) | |
77 | *memcg_ptr = memcg; | |
78 | return l; | |
60d3fd32 VD |
79 | } |
80 | #else | |
e0295238 KT |
81 | static void list_lru_register(struct list_lru *lru) |
82 | { | |
83 | } | |
84 | ||
85 | static void list_lru_unregister(struct list_lru *lru) | |
86 | { | |
87 | } | |
88 | ||
fae91d6d KT |
89 | static int lru_shrinker_id(struct list_lru *lru) |
90 | { | |
91 | return -1; | |
92 | } | |
93 | ||
60d3fd32 VD |
94 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
95 | { | |
96 | return false; | |
97 | } | |
98 | ||
99 | static inline struct list_lru_one * | |
100 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) | |
101 | { | |
102 | return &nlru->lru; | |
103 | } | |
104 | ||
105 | static inline struct list_lru_one * | |
44bd4a47 KT |
106 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
107 | struct mem_cgroup **memcg_ptr) | |
60d3fd32 | 108 | { |
44bd4a47 KT |
109 | if (memcg_ptr) |
110 | *memcg_ptr = NULL; | |
60d3fd32 VD |
111 | return &nlru->lru; |
112 | } | |
84c07d11 | 113 | #endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32 | 114 | |
a38e4082 DC |
115 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
116 | { | |
3b1d58a4 DC |
117 | int nid = page_to_nid(virt_to_page(item)); |
118 | struct list_lru_node *nlru = &lru->node[nid]; | |
fae91d6d | 119 | struct mem_cgroup *memcg; |
60d3fd32 | 120 | struct list_lru_one *l; |
3b1d58a4 DC |
121 | |
122 | spin_lock(&nlru->lock); | |
a38e4082 | 123 | if (list_empty(item)) { |
fae91d6d | 124 | l = list_lru_from_kmem(nlru, item, &memcg); |
60d3fd32 | 125 | list_add_tail(item, &l->list); |
fae91d6d KT |
126 | /* Set shrinker bit if the first element was added */ |
127 | if (!l->nr_items++) | |
2bfd3637 YS |
128 | set_shrinker_bit(memcg, nid, |
129 | lru_shrinker_id(lru)); | |
2c80cd57 | 130 | nlru->nr_items++; |
3b1d58a4 | 131 | spin_unlock(&nlru->lock); |
a38e4082 DC |
132 | return true; |
133 | } | |
3b1d58a4 | 134 | spin_unlock(&nlru->lock); |
a38e4082 DC |
135 | return false; |
136 | } | |
137 | EXPORT_SYMBOL_GPL(list_lru_add); | |
138 | ||
139 | bool list_lru_del(struct list_lru *lru, struct list_head *item) | |
140 | { | |
3b1d58a4 DC |
141 | int nid = page_to_nid(virt_to_page(item)); |
142 | struct list_lru_node *nlru = &lru->node[nid]; | |
60d3fd32 | 143 | struct list_lru_one *l; |
3b1d58a4 DC |
144 | |
145 | spin_lock(&nlru->lock); | |
a38e4082 | 146 | if (!list_empty(item)) { |
44bd4a47 | 147 | l = list_lru_from_kmem(nlru, item, NULL); |
a38e4082 | 148 | list_del_init(item); |
60d3fd32 | 149 | l->nr_items--; |
2c80cd57 | 150 | nlru->nr_items--; |
3b1d58a4 | 151 | spin_unlock(&nlru->lock); |
a38e4082 DC |
152 | return true; |
153 | } | |
3b1d58a4 | 154 | spin_unlock(&nlru->lock); |
a38e4082 DC |
155 | return false; |
156 | } | |
157 | EXPORT_SYMBOL_GPL(list_lru_del); | |
158 | ||
3f97b163 VD |
159 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
160 | { | |
161 | list_del_init(item); | |
162 | list->nr_items--; | |
163 | } | |
164 | EXPORT_SYMBOL_GPL(list_lru_isolate); | |
165 | ||
166 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, | |
167 | struct list_head *head) | |
168 | { | |
169 | list_move(item, head); | |
170 | list->nr_items--; | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); | |
173 | ||
930eaac5 AM |
174 | unsigned long list_lru_count_one(struct list_lru *lru, |
175 | int nid, struct mem_cgroup *memcg) | |
a38e4082 | 176 | { |
6a4f496f | 177 | struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32 VD |
178 | struct list_lru_one *l; |
179 | unsigned long count; | |
3b1d58a4 | 180 | |
0c7c1bed | 181 | rcu_read_lock(); |
930eaac5 | 182 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
a1f45935 | 183 | count = READ_ONCE(l->nr_items); |
0c7c1bed | 184 | rcu_read_unlock(); |
3b1d58a4 DC |
185 | |
186 | return count; | |
187 | } | |
60d3fd32 VD |
188 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
189 | ||
190 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) | |
191 | { | |
2c80cd57 | 192 | struct list_lru_node *nlru; |
60d3fd32 | 193 | |
2c80cd57 ST |
194 | nlru = &lru->node[nid]; |
195 | return nlru->nr_items; | |
60d3fd32 | 196 | } |
6a4f496f | 197 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
3b1d58a4 | 198 | |
60d3fd32 | 199 | static unsigned long |
6e018968 | 200 | __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, |
60d3fd32 VD |
201 | list_lru_walk_cb isolate, void *cb_arg, |
202 | unsigned long *nr_to_walk) | |
3b1d58a4 DC |
203 | { |
204 | ||
60d3fd32 | 205 | struct list_lru_one *l; |
a38e4082 | 206 | struct list_head *item, *n; |
3b1d58a4 | 207 | unsigned long isolated = 0; |
a38e4082 | 208 | |
60d3fd32 | 209 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
a38e4082 | 210 | restart: |
60d3fd32 | 211 | list_for_each_safe(item, n, &l->list) { |
a38e4082 | 212 | enum lru_status ret; |
5cedf721 DC |
213 | |
214 | /* | |
215 | * decrement nr_to_walk first so that we don't livelock if we | |
3dc5f032 | 216 | * get stuck on large numbers of LRU_RETRY items |
5cedf721 | 217 | */ |
c56b097a | 218 | if (!*nr_to_walk) |
5cedf721 | 219 | break; |
c56b097a | 220 | --*nr_to_walk; |
5cedf721 | 221 | |
3f97b163 | 222 | ret = isolate(item, l, &nlru->lock, cb_arg); |
a38e4082 | 223 | switch (ret) { |
449dd698 JW |
224 | case LRU_REMOVED_RETRY: |
225 | assert_spin_locked(&nlru->lock); | |
e4a9bc58 | 226 | fallthrough; |
a38e4082 | 227 | case LRU_REMOVED: |
3b1d58a4 | 228 | isolated++; |
2c80cd57 | 229 | nlru->nr_items--; |
449dd698 JW |
230 | /* |
231 | * If the lru lock has been dropped, our list | |
232 | * traversal is now invalid and so we have to | |
233 | * restart from scratch. | |
234 | */ | |
235 | if (ret == LRU_REMOVED_RETRY) | |
236 | goto restart; | |
a38e4082 DC |
237 | break; |
238 | case LRU_ROTATE: | |
60d3fd32 | 239 | list_move_tail(item, &l->list); |
a38e4082 DC |
240 | break; |
241 | case LRU_SKIP: | |
242 | break; | |
243 | case LRU_RETRY: | |
5cedf721 DC |
244 | /* |
245 | * The lru lock has been dropped, our list traversal is | |
246 | * now invalid and so we have to restart from scratch. | |
247 | */ | |
449dd698 | 248 | assert_spin_locked(&nlru->lock); |
a38e4082 DC |
249 | goto restart; |
250 | default: | |
251 | BUG(); | |
252 | } | |
a38e4082 | 253 | } |
3b1d58a4 DC |
254 | return isolated; |
255 | } | |
60d3fd32 VD |
256 | |
257 | unsigned long | |
258 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, | |
259 | list_lru_walk_cb isolate, void *cb_arg, | |
260 | unsigned long *nr_to_walk) | |
261 | { | |
6cfe57a9 SAS |
262 | struct list_lru_node *nlru = &lru->node[nid]; |
263 | unsigned long ret; | |
264 | ||
265 | spin_lock(&nlru->lock); | |
6e018968 SAS |
266 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, |
267 | nr_to_walk); | |
6cfe57a9 SAS |
268 | spin_unlock(&nlru->lock); |
269 | return ret; | |
60d3fd32 VD |
270 | } |
271 | EXPORT_SYMBOL_GPL(list_lru_walk_one); | |
272 | ||
6b51e881 SAS |
273 | unsigned long |
274 | list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, | |
275 | list_lru_walk_cb isolate, void *cb_arg, | |
276 | unsigned long *nr_to_walk) | |
277 | { | |
278 | struct list_lru_node *nlru = &lru->node[nid]; | |
279 | unsigned long ret; | |
280 | ||
281 | spin_lock_irq(&nlru->lock); | |
282 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, | |
283 | nr_to_walk); | |
284 | spin_unlock_irq(&nlru->lock); | |
285 | return ret; | |
286 | } | |
287 | ||
60d3fd32 VD |
288 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
289 | list_lru_walk_cb isolate, void *cb_arg, | |
290 | unsigned long *nr_to_walk) | |
291 | { | |
292 | long isolated = 0; | |
293 | int memcg_idx; | |
294 | ||
87a5ffc1 SAS |
295 | isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, |
296 | nr_to_walk); | |
60d3fd32 VD |
297 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
298 | for_each_memcg_cache_index(memcg_idx) { | |
6cfe57a9 SAS |
299 | struct list_lru_node *nlru = &lru->node[nid]; |
300 | ||
301 | spin_lock(&nlru->lock); | |
6e018968 SAS |
302 | isolated += __list_lru_walk_one(nlru, memcg_idx, |
303 | isolate, cb_arg, | |
304 | nr_to_walk); | |
6cfe57a9 SAS |
305 | spin_unlock(&nlru->lock); |
306 | ||
60d3fd32 VD |
307 | if (*nr_to_walk <= 0) |
308 | break; | |
309 | } | |
310 | } | |
311 | return isolated; | |
312 | } | |
3b1d58a4 DC |
313 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
314 | ||
60d3fd32 VD |
315 | static void init_one_lru(struct list_lru_one *l) |
316 | { | |
317 | INIT_LIST_HEAD(&l->list); | |
318 | l->nr_items = 0; | |
319 | } | |
320 | ||
84c07d11 | 321 | #ifdef CONFIG_MEMCG_KMEM |
60d3fd32 VD |
322 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
323 | int begin, int end) | |
324 | { | |
325 | int i; | |
326 | ||
327 | for (i = begin; i < end; i++) | |
328 | kfree(memcg_lrus->lru[i]); | |
329 | } | |
330 | ||
331 | static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, | |
332 | int begin, int end) | |
333 | { | |
334 | int i; | |
335 | ||
336 | for (i = begin; i < end; i++) { | |
337 | struct list_lru_one *l; | |
338 | ||
339 | l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); | |
340 | if (!l) | |
341 | goto fail; | |
342 | ||
343 | init_one_lru(l); | |
344 | memcg_lrus->lru[i] = l; | |
345 | } | |
346 | return 0; | |
347 | fail: | |
3510955b | 348 | __memcg_destroy_list_lru_node(memcg_lrus, begin, i); |
60d3fd32 VD |
349 | return -ENOMEM; |
350 | } | |
351 | ||
352 | static int memcg_init_list_lru_node(struct list_lru_node *nlru) | |
353 | { | |
0c7c1bed | 354 | struct list_lru_memcg *memcg_lrus; |
60d3fd32 VD |
355 | int size = memcg_nr_cache_ids; |
356 | ||
0c7c1bed KT |
357 | memcg_lrus = kvmalloc(sizeof(*memcg_lrus) + |
358 | size * sizeof(void *), GFP_KERNEL); | |
359 | if (!memcg_lrus) | |
60d3fd32 VD |
360 | return -ENOMEM; |
361 | ||
0c7c1bed KT |
362 | if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { |
363 | kvfree(memcg_lrus); | |
60d3fd32 VD |
364 | return -ENOMEM; |
365 | } | |
0c7c1bed | 366 | RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
60d3fd32 VD |
367 | |
368 | return 0; | |
369 | } | |
370 | ||
371 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) | |
372 | { | |
0c7c1bed KT |
373 | struct list_lru_memcg *memcg_lrus; |
374 | /* | |
375 | * This is called when shrinker has already been unregistered, | |
a7b7e1df | 376 | * and nobody can use it. So, there is no need to use kvfree_rcu(). |
0c7c1bed KT |
377 | */ |
378 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); | |
379 | __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); | |
380 | kvfree(memcg_lrus); | |
381 | } | |
382 | ||
60d3fd32 VD |
383 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
384 | int old_size, int new_size) | |
385 | { | |
386 | struct list_lru_memcg *old, *new; | |
387 | ||
388 | BUG_ON(old_size > new_size); | |
389 | ||
0c7c1bed KT |
390 | old = rcu_dereference_protected(nlru->memcg_lrus, |
391 | lockdep_is_held(&list_lrus_mutex)); | |
392 | new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); | |
60d3fd32 VD |
393 | if (!new) |
394 | return -ENOMEM; | |
395 | ||
396 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { | |
f80c7dab | 397 | kvfree(new); |
60d3fd32 VD |
398 | return -ENOMEM; |
399 | } | |
400 | ||
0c7c1bed | 401 | memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); |
60d3fd32 VD |
402 | |
403 | /* | |
0c7c1bed KT |
404 | * The locking below allows readers that hold nlru->lock avoid taking |
405 | * rcu_read_lock (see list_lru_from_memcg_idx). | |
60d3fd32 VD |
406 | * |
407 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, | |
408 | * we have to use IRQ-safe primitives here to avoid deadlock. | |
409 | */ | |
410 | spin_lock_irq(&nlru->lock); | |
0c7c1bed | 411 | rcu_assign_pointer(nlru->memcg_lrus, new); |
60d3fd32 VD |
412 | spin_unlock_irq(&nlru->lock); |
413 | ||
a7b7e1df | 414 | kvfree_rcu(old, rcu); |
60d3fd32 VD |
415 | return 0; |
416 | } | |
417 | ||
418 | static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, | |
419 | int old_size, int new_size) | |
420 | { | |
0c7c1bed KT |
421 | struct list_lru_memcg *memcg_lrus; |
422 | ||
423 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, | |
424 | lockdep_is_held(&list_lrus_mutex)); | |
60d3fd32 VD |
425 | /* do not bother shrinking the array back to the old size, because we |
426 | * cannot handle allocation failures here */ | |
0c7c1bed | 427 | __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
60d3fd32 VD |
428 | } |
429 | ||
430 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) | |
431 | { | |
432 | int i; | |
433 | ||
3e858996 JS |
434 | lru->memcg_aware = memcg_aware; |
435 | ||
145949a1 R |
436 | if (!memcg_aware) |
437 | return 0; | |
438 | ||
439 | for_each_node(i) { | |
440 | if (memcg_init_list_lru_node(&lru->node[i])) | |
60d3fd32 VD |
441 | goto fail; |
442 | } | |
443 | return 0; | |
444 | fail: | |
145949a1 R |
445 | for (i = i - 1; i >= 0; i--) { |
446 | if (!lru->node[i].memcg_lrus) | |
447 | continue; | |
60d3fd32 | 448 | memcg_destroy_list_lru_node(&lru->node[i]); |
145949a1 | 449 | } |
60d3fd32 VD |
450 | return -ENOMEM; |
451 | } | |
452 | ||
453 | static void memcg_destroy_list_lru(struct list_lru *lru) | |
454 | { | |
455 | int i; | |
456 | ||
457 | if (!list_lru_memcg_aware(lru)) | |
458 | return; | |
459 | ||
145949a1 | 460 | for_each_node(i) |
60d3fd32 VD |
461 | memcg_destroy_list_lru_node(&lru->node[i]); |
462 | } | |
463 | ||
464 | static int memcg_update_list_lru(struct list_lru *lru, | |
465 | int old_size, int new_size) | |
466 | { | |
467 | int i; | |
468 | ||
469 | if (!list_lru_memcg_aware(lru)) | |
470 | return 0; | |
471 | ||
145949a1 | 472 | for_each_node(i) { |
60d3fd32 VD |
473 | if (memcg_update_list_lru_node(&lru->node[i], |
474 | old_size, new_size)) | |
475 | goto fail; | |
476 | } | |
477 | return 0; | |
478 | fail: | |
145949a1 R |
479 | for (i = i - 1; i >= 0; i--) { |
480 | if (!lru->node[i].memcg_lrus) | |
481 | continue; | |
482 | ||
60d3fd32 VD |
483 | memcg_cancel_update_list_lru_node(&lru->node[i], |
484 | old_size, new_size); | |
145949a1 | 485 | } |
60d3fd32 VD |
486 | return -ENOMEM; |
487 | } | |
488 | ||
489 | static void memcg_cancel_update_list_lru(struct list_lru *lru, | |
490 | int old_size, int new_size) | |
491 | { | |
492 | int i; | |
493 | ||
494 | if (!list_lru_memcg_aware(lru)) | |
495 | return; | |
496 | ||
145949a1 | 497 | for_each_node(i) |
60d3fd32 VD |
498 | memcg_cancel_update_list_lru_node(&lru->node[i], |
499 | old_size, new_size); | |
500 | } | |
501 | ||
502 | int memcg_update_all_list_lrus(int new_size) | |
503 | { | |
504 | int ret = 0; | |
505 | struct list_lru *lru; | |
506 | int old_size = memcg_nr_cache_ids; | |
507 | ||
508 | mutex_lock(&list_lrus_mutex); | |
509 | list_for_each_entry(lru, &list_lrus, list) { | |
510 | ret = memcg_update_list_lru(lru, old_size, new_size); | |
511 | if (ret) | |
512 | goto fail; | |
513 | } | |
514 | out: | |
515 | mutex_unlock(&list_lrus_mutex); | |
516 | return ret; | |
517 | fail: | |
518 | list_for_each_entry_continue_reverse(lru, &list_lrus, list) | |
519 | memcg_cancel_update_list_lru(lru, old_size, new_size); | |
520 | goto out; | |
521 | } | |
2788cf0c | 522 | |
3b82c4dc | 523 | static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, |
9bec5c35 | 524 | int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c | 525 | { |
3b82c4dc | 526 | struct list_lru_node *nlru = &lru->node[nid]; |
9bec5c35 | 527 | int dst_idx = dst_memcg->kmemcg_id; |
2788cf0c VD |
528 | struct list_lru_one *src, *dst; |
529 | ||
530 | /* | |
531 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, | |
532 | * we have to use IRQ-safe primitives here to avoid deadlock. | |
533 | */ | |
534 | spin_lock_irq(&nlru->lock); | |
535 | ||
536 | src = list_lru_from_memcg_idx(nlru, src_idx); | |
537 | dst = list_lru_from_memcg_idx(nlru, dst_idx); | |
538 | ||
539 | list_splice_init(&src->list, &dst->list); | |
8199be00 YS |
540 | |
541 | if (src->nr_items) { | |
542 | dst->nr_items += src->nr_items; | |
2bfd3637 | 543 | set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
8199be00 YS |
544 | src->nr_items = 0; |
545 | } | |
2788cf0c VD |
546 | |
547 | spin_unlock_irq(&nlru->lock); | |
548 | } | |
549 | ||
550 | static void memcg_drain_list_lru(struct list_lru *lru, | |
9bec5c35 | 551 | int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c VD |
552 | { |
553 | int i; | |
554 | ||
555 | if (!list_lru_memcg_aware(lru)) | |
556 | return; | |
557 | ||
145949a1 | 558 | for_each_node(i) |
3b82c4dc | 559 | memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); |
2788cf0c VD |
560 | } |
561 | ||
9bec5c35 | 562 | void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c VD |
563 | { |
564 | struct list_lru *lru; | |
565 | ||
566 | mutex_lock(&list_lrus_mutex); | |
567 | list_for_each_entry(lru, &list_lrus, list) | |
9bec5c35 | 568 | memcg_drain_list_lru(lru, src_idx, dst_memcg); |
2788cf0c VD |
569 | mutex_unlock(&list_lrus_mutex); |
570 | } | |
60d3fd32 VD |
571 | #else |
572 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) | |
573 | { | |
574 | return 0; | |
575 | } | |
576 | ||
577 | static void memcg_destroy_list_lru(struct list_lru *lru) | |
578 | { | |
579 | } | |
84c07d11 | 580 | #endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32 VD |
581 | |
582 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, | |
c92e8e10 | 583 | struct lock_class_key *key, struct shrinker *shrinker) |
a38e4082 | 584 | { |
3b1d58a4 | 585 | int i; |
60d3fd32 VD |
586 | int err = -ENOMEM; |
587 | ||
c92e8e10 KT |
588 | #ifdef CONFIG_MEMCG_KMEM |
589 | if (shrinker) | |
590 | lru->shrinker_id = shrinker->id; | |
591 | else | |
592 | lru->shrinker_id = -1; | |
593 | #endif | |
60d3fd32 | 594 | memcg_get_cache_ids(); |
5ca302c8 | 595 | |
b9726c26 | 596 | lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
5ca302c8 | 597 | if (!lru->node) |
60d3fd32 | 598 | goto out; |
a38e4082 | 599 | |
145949a1 | 600 | for_each_node(i) { |
3b1d58a4 | 601 | spin_lock_init(&lru->node[i].lock); |
449dd698 JW |
602 | if (key) |
603 | lockdep_set_class(&lru->node[i].lock, key); | |
60d3fd32 VD |
604 | init_one_lru(&lru->node[i].lru); |
605 | } | |
606 | ||
607 | err = memcg_init_list_lru(lru, memcg_aware); | |
608 | if (err) { | |
609 | kfree(lru->node); | |
1bc11d70 AP |
610 | /* Do this so a list_lru_destroy() doesn't crash: */ |
611 | lru->node = NULL; | |
60d3fd32 | 612 | goto out; |
3b1d58a4 | 613 | } |
60d3fd32 | 614 | |
c0a5b560 | 615 | list_lru_register(lru); |
60d3fd32 VD |
616 | out: |
617 | memcg_put_cache_ids(); | |
618 | return err; | |
a38e4082 | 619 | } |
60d3fd32 | 620 | EXPORT_SYMBOL_GPL(__list_lru_init); |
5ca302c8 GC |
621 | |
622 | void list_lru_destroy(struct list_lru *lru) | |
623 | { | |
c0a5b560 VD |
624 | /* Already destroyed or not yet initialized? */ |
625 | if (!lru->node) | |
626 | return; | |
60d3fd32 VD |
627 | |
628 | memcg_get_cache_ids(); | |
629 | ||
c0a5b560 | 630 | list_lru_unregister(lru); |
60d3fd32 VD |
631 | |
632 | memcg_destroy_list_lru(lru); | |
5ca302c8 | 633 | kfree(lru->node); |
c0a5b560 | 634 | lru->node = NULL; |
60d3fd32 | 635 | |
c92e8e10 KT |
636 | #ifdef CONFIG_MEMCG_KMEM |
637 | lru->shrinker_id = -1; | |
638 | #endif | |
60d3fd32 | 639 | memcg_put_cache_ids(); |
5ca302c8 GC |
640 | } |
641 | EXPORT_SYMBOL_GPL(list_lru_destroy); |