]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - mm/list_lru.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/list_lru.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/memcontrol.h>
16 #ifdef CONFIG_MEMCG_KMEM
17 static LIST_HEAD(list_lrus
);
18 static DEFINE_MUTEX(list_lrus_mutex
);
20 static void list_lru_register(struct list_lru
*lru
)
22 mutex_lock(&list_lrus_mutex
);
23 list_add(&lru
->list
, &list_lrus
);
24 mutex_unlock(&list_lrus_mutex
);
27 static void list_lru_unregister(struct list_lru
*lru
)
29 mutex_lock(&list_lrus_mutex
);
31 mutex_unlock(&list_lrus_mutex
);
34 static int lru_shrinker_id(struct list_lru
*lru
)
36 return lru
->shrinker_id
;
39 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
41 return lru
->memcg_aware
;
44 static inline struct list_lru_one
*
45 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
47 struct list_lru_memcg
*memcg_lrus
;
49 * Either lock or RCU protects the array of per cgroup lists
50 * from relocation (see memcg_update_list_lru_node).
52 memcg_lrus
= rcu_dereference_check(nlru
->memcg_lrus
,
53 lockdep_is_held(&nlru
->lock
));
54 if (memcg_lrus
&& idx
>= 0)
55 return memcg_lrus
->lru
[idx
];
59 static __always_inline
struct mem_cgroup
*mem_cgroup_from_kmem(void *ptr
)
63 if (!memcg_kmem_enabled())
65 page
= virt_to_head_page(ptr
);
66 return page
->mem_cgroup
;
69 static inline struct list_lru_one
*
70 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
71 struct mem_cgroup
**memcg_ptr
)
73 struct list_lru_one
*l
= &nlru
->lru
;
74 struct mem_cgroup
*memcg
= NULL
;
76 if (!nlru
->memcg_lrus
)
79 memcg
= mem_cgroup_from_kmem(ptr
);
83 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
90 static void list_lru_register(struct list_lru
*lru
)
94 static void list_lru_unregister(struct list_lru
*lru
)
98 static int lru_shrinker_id(struct list_lru
*lru
)
103 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
108 static inline struct list_lru_one
*
109 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
114 static inline struct list_lru_one
*
115 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
116 struct mem_cgroup
**memcg_ptr
)
122 #endif /* CONFIG_MEMCG_KMEM */
124 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
126 int nid
= page_to_nid(virt_to_page(item
));
127 struct list_lru_node
*nlru
= &lru
->node
[nid
];
128 struct mem_cgroup
*memcg
;
129 struct list_lru_one
*l
;
131 spin_lock(&nlru
->lock
);
132 if (list_empty(item
)) {
133 l
= list_lru_from_kmem(nlru
, item
, &memcg
);
134 list_add_tail(item
, &l
->list
);
135 /* Set shrinker bit if the first element was added */
137 memcg_set_shrinker_bit(memcg
, nid
,
138 lru_shrinker_id(lru
));
140 spin_unlock(&nlru
->lock
);
143 spin_unlock(&nlru
->lock
);
146 EXPORT_SYMBOL_GPL(list_lru_add
);
148 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
150 int nid
= page_to_nid(virt_to_page(item
));
151 struct list_lru_node
*nlru
= &lru
->node
[nid
];
152 struct list_lru_one
*l
;
154 spin_lock(&nlru
->lock
);
155 if (!list_empty(item
)) {
156 l
= list_lru_from_kmem(nlru
, item
, NULL
);
160 spin_unlock(&nlru
->lock
);
163 spin_unlock(&nlru
->lock
);
166 EXPORT_SYMBOL_GPL(list_lru_del
);
168 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
)
173 EXPORT_SYMBOL_GPL(list_lru_isolate
);
175 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
176 struct list_head
*head
)
178 list_move(item
, head
);
181 EXPORT_SYMBOL_GPL(list_lru_isolate_move
);
183 unsigned long list_lru_count_one(struct list_lru
*lru
,
184 int nid
, struct mem_cgroup
*memcg
)
186 struct list_lru_node
*nlru
= &lru
->node
[nid
];
187 struct list_lru_one
*l
;
191 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
197 EXPORT_SYMBOL_GPL(list_lru_count_one
);
199 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
)
201 struct list_lru_node
*nlru
;
203 nlru
= &lru
->node
[nid
];
204 return nlru
->nr_items
;
206 EXPORT_SYMBOL_GPL(list_lru_count_node
);
209 __list_lru_walk_one(struct list_lru_node
*nlru
, int memcg_idx
,
210 list_lru_walk_cb isolate
, void *cb_arg
,
211 unsigned long *nr_to_walk
)
214 struct list_lru_one
*l
;
215 struct list_head
*item
, *n
;
216 unsigned long isolated
= 0;
218 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
220 list_for_each_safe(item
, n
, &l
->list
) {
224 * decrement nr_to_walk first so that we don't livelock if we
225 * get stuck on large numbesr of LRU_RETRY items
231 ret
= isolate(item
, l
, &nlru
->lock
, cb_arg
);
233 case LRU_REMOVED_RETRY
:
234 assert_spin_locked(&nlru
->lock
);
240 * If the lru lock has been dropped, our list
241 * traversal is now invalid and so we have to
242 * restart from scratch.
244 if (ret
== LRU_REMOVED_RETRY
)
248 list_move_tail(item
, &l
->list
);
254 * The lru lock has been dropped, our list traversal is
255 * now invalid and so we have to restart from scratch.
257 assert_spin_locked(&nlru
->lock
);
267 list_lru_walk_one(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
268 list_lru_walk_cb isolate
, void *cb_arg
,
269 unsigned long *nr_to_walk
)
271 struct list_lru_node
*nlru
= &lru
->node
[nid
];
274 spin_lock(&nlru
->lock
);
275 ret
= __list_lru_walk_one(nlru
, memcg_cache_id(memcg
), isolate
, cb_arg
,
277 spin_unlock(&nlru
->lock
);
280 EXPORT_SYMBOL_GPL(list_lru_walk_one
);
283 list_lru_walk_one_irq(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
284 list_lru_walk_cb isolate
, void *cb_arg
,
285 unsigned long *nr_to_walk
)
287 struct list_lru_node
*nlru
= &lru
->node
[nid
];
290 spin_lock_irq(&nlru
->lock
);
291 ret
= __list_lru_walk_one(nlru
, memcg_cache_id(memcg
), isolate
, cb_arg
,
293 spin_unlock_irq(&nlru
->lock
);
297 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
298 list_lru_walk_cb isolate
, void *cb_arg
,
299 unsigned long *nr_to_walk
)
304 isolated
+= list_lru_walk_one(lru
, nid
, NULL
, isolate
, cb_arg
,
306 if (*nr_to_walk
> 0 && list_lru_memcg_aware(lru
)) {
307 for_each_memcg_cache_index(memcg_idx
) {
308 struct list_lru_node
*nlru
= &lru
->node
[nid
];
310 spin_lock(&nlru
->lock
);
311 isolated
+= __list_lru_walk_one(nlru
, memcg_idx
,
314 spin_unlock(&nlru
->lock
);
316 if (*nr_to_walk
<= 0)
322 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
324 static void init_one_lru(struct list_lru_one
*l
)
326 INIT_LIST_HEAD(&l
->list
);
330 #ifdef CONFIG_MEMCG_KMEM
331 static void __memcg_destroy_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
336 for (i
= begin
; i
< end
; i
++)
337 kfree(memcg_lrus
->lru
[i
]);
340 static int __memcg_init_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
345 for (i
= begin
; i
< end
; i
++) {
346 struct list_lru_one
*l
;
348 l
= kmalloc(sizeof(struct list_lru_one
), GFP_KERNEL
);
353 memcg_lrus
->lru
[i
] = l
;
357 __memcg_destroy_list_lru_node(memcg_lrus
, begin
, i
);
361 static int memcg_init_list_lru_node(struct list_lru_node
*nlru
)
363 struct list_lru_memcg
*memcg_lrus
;
364 int size
= memcg_nr_cache_ids
;
366 memcg_lrus
= kvmalloc(sizeof(*memcg_lrus
) +
367 size
* sizeof(void *), GFP_KERNEL
);
371 if (__memcg_init_list_lru_node(memcg_lrus
, 0, size
)) {
375 RCU_INIT_POINTER(nlru
->memcg_lrus
, memcg_lrus
);
380 static void memcg_destroy_list_lru_node(struct list_lru_node
*nlru
)
382 struct list_lru_memcg
*memcg_lrus
;
384 * This is called when shrinker has already been unregistered,
385 * and nobody can use it. So, there is no need to use kvfree_rcu().
387 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
, true);
388 __memcg_destroy_list_lru_node(memcg_lrus
, 0, memcg_nr_cache_ids
);
392 static void kvfree_rcu(struct rcu_head
*head
)
394 struct list_lru_memcg
*mlru
;
396 mlru
= container_of(head
, struct list_lru_memcg
, rcu
);
400 static int memcg_update_list_lru_node(struct list_lru_node
*nlru
,
401 int old_size
, int new_size
)
403 struct list_lru_memcg
*old
, *new;
405 BUG_ON(old_size
> new_size
);
407 old
= rcu_dereference_protected(nlru
->memcg_lrus
,
408 lockdep_is_held(&list_lrus_mutex
));
409 new = kvmalloc(sizeof(*new) + new_size
* sizeof(void *), GFP_KERNEL
);
413 if (__memcg_init_list_lru_node(new, old_size
, new_size
)) {
418 memcpy(&new->lru
, &old
->lru
, old_size
* sizeof(void *));
421 * The locking below allows readers that hold nlru->lock avoid taking
422 * rcu_read_lock (see list_lru_from_memcg_idx).
424 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
425 * we have to use IRQ-safe primitives here to avoid deadlock.
427 spin_lock_irq(&nlru
->lock
);
428 rcu_assign_pointer(nlru
->memcg_lrus
, new);
429 spin_unlock_irq(&nlru
->lock
);
431 call_rcu(&old
->rcu
, kvfree_rcu
);
435 static void memcg_cancel_update_list_lru_node(struct list_lru_node
*nlru
,
436 int old_size
, int new_size
)
438 struct list_lru_memcg
*memcg_lrus
;
440 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
,
441 lockdep_is_held(&list_lrus_mutex
));
442 /* do not bother shrinking the array back to the old size, because we
443 * cannot handle allocation failures here */
444 __memcg_destroy_list_lru_node(memcg_lrus
, old_size
, new_size
);
447 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
451 lru
->memcg_aware
= memcg_aware
;
457 if (memcg_init_list_lru_node(&lru
->node
[i
]))
462 for (i
= i
- 1; i
>= 0; i
--) {
463 if (!lru
->node
[i
].memcg_lrus
)
465 memcg_destroy_list_lru_node(&lru
->node
[i
]);
470 static void memcg_destroy_list_lru(struct list_lru
*lru
)
474 if (!list_lru_memcg_aware(lru
))
478 memcg_destroy_list_lru_node(&lru
->node
[i
]);
481 static int memcg_update_list_lru(struct list_lru
*lru
,
482 int old_size
, int new_size
)
486 if (!list_lru_memcg_aware(lru
))
490 if (memcg_update_list_lru_node(&lru
->node
[i
],
496 for (i
= i
- 1; i
>= 0; i
--) {
497 if (!lru
->node
[i
].memcg_lrus
)
500 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
506 static void memcg_cancel_update_list_lru(struct list_lru
*lru
,
507 int old_size
, int new_size
)
511 if (!list_lru_memcg_aware(lru
))
515 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
519 int memcg_update_all_list_lrus(int new_size
)
522 struct list_lru
*lru
;
523 int old_size
= memcg_nr_cache_ids
;
525 mutex_lock(&list_lrus_mutex
);
526 list_for_each_entry(lru
, &list_lrus
, list
) {
527 ret
= memcg_update_list_lru(lru
, old_size
, new_size
);
532 mutex_unlock(&list_lrus_mutex
);
535 list_for_each_entry_continue_reverse(lru
, &list_lrus
, list
)
536 memcg_cancel_update_list_lru(lru
, old_size
, new_size
);
540 static void memcg_drain_list_lru_node(struct list_lru
*lru
, int nid
,
541 int src_idx
, struct mem_cgroup
*dst_memcg
)
543 struct list_lru_node
*nlru
= &lru
->node
[nid
];
544 int dst_idx
= dst_memcg
->kmemcg_id
;
545 struct list_lru_one
*src
, *dst
;
549 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
550 * we have to use IRQ-safe primitives here to avoid deadlock.
552 spin_lock_irq(&nlru
->lock
);
554 src
= list_lru_from_memcg_idx(nlru
, src_idx
);
555 dst
= list_lru_from_memcg_idx(nlru
, dst_idx
);
557 list_splice_init(&src
->list
, &dst
->list
);
558 set
= (!dst
->nr_items
&& src
->nr_items
);
559 dst
->nr_items
+= src
->nr_items
;
561 memcg_set_shrinker_bit(dst_memcg
, nid
, lru_shrinker_id(lru
));
564 spin_unlock_irq(&nlru
->lock
);
567 static void memcg_drain_list_lru(struct list_lru
*lru
,
568 int src_idx
, struct mem_cgroup
*dst_memcg
)
572 if (!list_lru_memcg_aware(lru
))
576 memcg_drain_list_lru_node(lru
, i
, src_idx
, dst_memcg
);
579 void memcg_drain_all_list_lrus(int src_idx
, struct mem_cgroup
*dst_memcg
)
581 struct list_lru
*lru
;
583 mutex_lock(&list_lrus_mutex
);
584 list_for_each_entry(lru
, &list_lrus
, list
)
585 memcg_drain_list_lru(lru
, src_idx
, dst_memcg
);
586 mutex_unlock(&list_lrus_mutex
);
589 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
594 static void memcg_destroy_list_lru(struct list_lru
*lru
)
597 #endif /* CONFIG_MEMCG_KMEM */
599 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
600 struct lock_class_key
*key
, struct shrinker
*shrinker
)
605 #ifdef CONFIG_MEMCG_KMEM
607 lru
->shrinker_id
= shrinker
->id
;
609 lru
->shrinker_id
= -1;
611 memcg_get_cache_ids();
613 lru
->node
= kcalloc(nr_node_ids
, sizeof(*lru
->node
), GFP_KERNEL
);
618 spin_lock_init(&lru
->node
[i
].lock
);
620 lockdep_set_class(&lru
->node
[i
].lock
, key
);
621 init_one_lru(&lru
->node
[i
].lru
);
624 err
= memcg_init_list_lru(lru
, memcg_aware
);
627 /* Do this so a list_lru_destroy() doesn't crash: */
632 list_lru_register(lru
);
634 memcg_put_cache_ids();
637 EXPORT_SYMBOL_GPL(__list_lru_init
);
639 void list_lru_destroy(struct list_lru
*lru
)
641 /* Already destroyed or not yet initialized? */
645 memcg_get_cache_ids();
647 list_lru_unregister(lru
);
649 memcg_destroy_list_lru(lru
);
653 #ifdef CONFIG_MEMCG_KMEM
654 lru
->shrinker_id
= -1;
656 memcg_put_cache_ids();
658 EXPORT_SYMBOL_GPL(list_lru_destroy
);