]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/list_lru.c
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus
);
17 static DEFINE_MUTEX(list_lrus_mutex
);
19 static void list_lru_register(struct list_lru
*lru
)
21 mutex_lock(&list_lrus_mutex
);
22 list_add(&lru
->list
, &list_lrus
);
23 mutex_unlock(&list_lrus_mutex
);
26 static void list_lru_unregister(struct list_lru
*lru
)
28 mutex_lock(&list_lrus_mutex
);
30 mutex_unlock(&list_lrus_mutex
);
33 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
36 * This needs node 0 to be always present, even
37 * in the systems supporting sparse numa ids.
39 return !!lru
->node
[0].memcg_lrus
;
42 static inline struct list_lru_one
*
43 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
45 struct list_lru_memcg
*memcg_lrus
;
47 * Either lock or RCU protects the array of per cgroup lists
48 * from relocation (see memcg_update_list_lru_node).
50 memcg_lrus
= rcu_dereference_check(nlru
->memcg_lrus
,
51 lockdep_is_held(&nlru
->lock
));
52 if (memcg_lrus
&& idx
>= 0)
53 return memcg_lrus
->lru
[idx
];
57 static __always_inline
struct mem_cgroup
*mem_cgroup_from_kmem(void *ptr
)
61 if (!memcg_kmem_enabled())
63 page
= virt_to_head_page(ptr
);
64 return page
->mem_cgroup
;
67 static inline struct list_lru_one
*
68 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
69 struct mem_cgroup
**memcg_ptr
)
71 struct list_lru_one
*l
= &nlru
->lru
;
72 struct mem_cgroup
*memcg
= NULL
;
74 if (!nlru
->memcg_lrus
)
77 memcg
= mem_cgroup_from_kmem(ptr
);
81 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
88 static void list_lru_register(struct list_lru
*lru
)
92 static void list_lru_unregister(struct list_lru
*lru
)
96 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
101 static inline struct list_lru_one
*
102 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
107 static inline struct list_lru_one
*
108 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
109 struct mem_cgroup
**memcg_ptr
)
115 #endif /* CONFIG_MEMCG_KMEM */
117 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
119 int nid
= page_to_nid(virt_to_page(item
));
120 struct list_lru_node
*nlru
= &lru
->node
[nid
];
121 struct list_lru_one
*l
;
123 spin_lock(&nlru
->lock
);
124 if (list_empty(item
)) {
125 l
= list_lru_from_kmem(nlru
, item
, NULL
);
126 list_add_tail(item
, &l
->list
);
129 spin_unlock(&nlru
->lock
);
132 spin_unlock(&nlru
->lock
);
135 EXPORT_SYMBOL_GPL(list_lru_add
);
137 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
139 int nid
= page_to_nid(virt_to_page(item
));
140 struct list_lru_node
*nlru
= &lru
->node
[nid
];
141 struct list_lru_one
*l
;
143 spin_lock(&nlru
->lock
);
144 if (!list_empty(item
)) {
145 l
= list_lru_from_kmem(nlru
, item
, NULL
);
149 spin_unlock(&nlru
->lock
);
152 spin_unlock(&nlru
->lock
);
155 EXPORT_SYMBOL_GPL(list_lru_del
);
157 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
)
162 EXPORT_SYMBOL_GPL(list_lru_isolate
);
164 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
165 struct list_head
*head
)
167 list_move(item
, head
);
170 EXPORT_SYMBOL_GPL(list_lru_isolate_move
);
172 unsigned long list_lru_count_one(struct list_lru
*lru
,
173 int nid
, struct mem_cgroup
*memcg
)
175 struct list_lru_node
*nlru
= &lru
->node
[nid
];
176 struct list_lru_one
*l
;
180 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
186 EXPORT_SYMBOL_GPL(list_lru_count_one
);
188 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
)
190 struct list_lru_node
*nlru
;
192 nlru
= &lru
->node
[nid
];
193 return nlru
->nr_items
;
195 EXPORT_SYMBOL_GPL(list_lru_count_node
);
198 __list_lru_walk_one(struct list_lru
*lru
, int nid
, int memcg_idx
,
199 list_lru_walk_cb isolate
, void *cb_arg
,
200 unsigned long *nr_to_walk
)
203 struct list_lru_node
*nlru
= &lru
->node
[nid
];
204 struct list_lru_one
*l
;
205 struct list_head
*item
, *n
;
206 unsigned long isolated
= 0;
208 spin_lock(&nlru
->lock
);
209 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
211 list_for_each_safe(item
, n
, &l
->list
) {
215 * decrement nr_to_walk first so that we don't livelock if we
216 * get stuck on large numbesr of LRU_RETRY items
222 ret
= isolate(item
, l
, &nlru
->lock
, cb_arg
);
224 case LRU_REMOVED_RETRY
:
225 assert_spin_locked(&nlru
->lock
);
231 * If the lru lock has been dropped, our list
232 * traversal is now invalid and so we have to
233 * restart from scratch.
235 if (ret
== LRU_REMOVED_RETRY
)
239 list_move_tail(item
, &l
->list
);
245 * The lru lock has been dropped, our list traversal is
246 * now invalid and so we have to restart from scratch.
248 assert_spin_locked(&nlru
->lock
);
255 spin_unlock(&nlru
->lock
);
260 list_lru_walk_one(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
261 list_lru_walk_cb isolate
, void *cb_arg
,
262 unsigned long *nr_to_walk
)
264 return __list_lru_walk_one(lru
, nid
, memcg_cache_id(memcg
),
265 isolate
, cb_arg
, nr_to_walk
);
267 EXPORT_SYMBOL_GPL(list_lru_walk_one
);
269 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
270 list_lru_walk_cb isolate
, void *cb_arg
,
271 unsigned long *nr_to_walk
)
276 isolated
+= __list_lru_walk_one(lru
, nid
, -1, isolate
, cb_arg
,
278 if (*nr_to_walk
> 0 && list_lru_memcg_aware(lru
)) {
279 for_each_memcg_cache_index(memcg_idx
) {
280 isolated
+= __list_lru_walk_one(lru
, nid
, memcg_idx
,
281 isolate
, cb_arg
, nr_to_walk
);
282 if (*nr_to_walk
<= 0)
288 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
290 static void init_one_lru(struct list_lru_one
*l
)
292 INIT_LIST_HEAD(&l
->list
);
296 #ifdef CONFIG_MEMCG_KMEM
297 static void __memcg_destroy_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
302 for (i
= begin
; i
< end
; i
++)
303 kfree(memcg_lrus
->lru
[i
]);
306 static int __memcg_init_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
311 for (i
= begin
; i
< end
; i
++) {
312 struct list_lru_one
*l
;
314 l
= kmalloc(sizeof(struct list_lru_one
), GFP_KERNEL
);
319 memcg_lrus
->lru
[i
] = l
;
323 __memcg_destroy_list_lru_node(memcg_lrus
, begin
, i
- 1);
327 static int memcg_init_list_lru_node(struct list_lru_node
*nlru
)
329 struct list_lru_memcg
*memcg_lrus
;
330 int size
= memcg_nr_cache_ids
;
332 memcg_lrus
= kvmalloc(sizeof(*memcg_lrus
) +
333 size
* sizeof(void *), GFP_KERNEL
);
337 if (__memcg_init_list_lru_node(memcg_lrus
, 0, size
)) {
341 RCU_INIT_POINTER(nlru
->memcg_lrus
, memcg_lrus
);
346 static void memcg_destroy_list_lru_node(struct list_lru_node
*nlru
)
348 struct list_lru_memcg
*memcg_lrus
;
350 * This is called when shrinker has already been unregistered,
351 * and nobody can use it. So, there is no need to use kvfree_rcu().
353 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
, true);
354 __memcg_destroy_list_lru_node(memcg_lrus
, 0, memcg_nr_cache_ids
);
358 static void kvfree_rcu(struct rcu_head
*head
)
360 struct list_lru_memcg
*mlru
;
362 mlru
= container_of(head
, struct list_lru_memcg
, rcu
);
366 static int memcg_update_list_lru_node(struct list_lru_node
*nlru
,
367 int old_size
, int new_size
)
369 struct list_lru_memcg
*old
, *new;
371 BUG_ON(old_size
> new_size
);
373 old
= rcu_dereference_protected(nlru
->memcg_lrus
,
374 lockdep_is_held(&list_lrus_mutex
));
375 new = kvmalloc(sizeof(*new) + new_size
* sizeof(void *), GFP_KERNEL
);
379 if (__memcg_init_list_lru_node(new, old_size
, new_size
)) {
384 memcpy(&new->lru
, &old
->lru
, old_size
* sizeof(void *));
387 * The locking below allows readers that hold nlru->lock avoid taking
388 * rcu_read_lock (see list_lru_from_memcg_idx).
390 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
391 * we have to use IRQ-safe primitives here to avoid deadlock.
393 spin_lock_irq(&nlru
->lock
);
394 rcu_assign_pointer(nlru
->memcg_lrus
, new);
395 spin_unlock_irq(&nlru
->lock
);
397 call_rcu(&old
->rcu
, kvfree_rcu
);
401 static void memcg_cancel_update_list_lru_node(struct list_lru_node
*nlru
,
402 int old_size
, int new_size
)
404 struct list_lru_memcg
*memcg_lrus
;
406 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
,
407 lockdep_is_held(&list_lrus_mutex
));
408 /* do not bother shrinking the array back to the old size, because we
409 * cannot handle allocation failures here */
410 __memcg_destroy_list_lru_node(memcg_lrus
, old_size
, new_size
);
413 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
421 if (memcg_init_list_lru_node(&lru
->node
[i
]))
426 for (i
= i
- 1; i
>= 0; i
--) {
427 if (!lru
->node
[i
].memcg_lrus
)
429 memcg_destroy_list_lru_node(&lru
->node
[i
]);
434 static void memcg_destroy_list_lru(struct list_lru
*lru
)
438 if (!list_lru_memcg_aware(lru
))
442 memcg_destroy_list_lru_node(&lru
->node
[i
]);
445 static int memcg_update_list_lru(struct list_lru
*lru
,
446 int old_size
, int new_size
)
450 if (!list_lru_memcg_aware(lru
))
454 if (memcg_update_list_lru_node(&lru
->node
[i
],
460 for (i
= i
- 1; i
>= 0; i
--) {
461 if (!lru
->node
[i
].memcg_lrus
)
464 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
470 static void memcg_cancel_update_list_lru(struct list_lru
*lru
,
471 int old_size
, int new_size
)
475 if (!list_lru_memcg_aware(lru
))
479 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
483 int memcg_update_all_list_lrus(int new_size
)
486 struct list_lru
*lru
;
487 int old_size
= memcg_nr_cache_ids
;
489 mutex_lock(&list_lrus_mutex
);
490 list_for_each_entry(lru
, &list_lrus
, list
) {
491 ret
= memcg_update_list_lru(lru
, old_size
, new_size
);
496 mutex_unlock(&list_lrus_mutex
);
499 list_for_each_entry_continue_reverse(lru
, &list_lrus
, list
)
500 memcg_cancel_update_list_lru(lru
, old_size
, new_size
);
504 static void memcg_drain_list_lru_node(struct list_lru
*lru
, int nid
,
505 int src_idx
, struct mem_cgroup
*dst_memcg
)
507 struct list_lru_node
*nlru
= &lru
->node
[nid
];
508 int dst_idx
= dst_memcg
->kmemcg_id
;
509 struct list_lru_one
*src
, *dst
;
512 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
513 * we have to use IRQ-safe primitives here to avoid deadlock.
515 spin_lock_irq(&nlru
->lock
);
517 src
= list_lru_from_memcg_idx(nlru
, src_idx
);
518 dst
= list_lru_from_memcg_idx(nlru
, dst_idx
);
520 list_splice_init(&src
->list
, &dst
->list
);
521 dst
->nr_items
+= src
->nr_items
;
524 spin_unlock_irq(&nlru
->lock
);
527 static void memcg_drain_list_lru(struct list_lru
*lru
,
528 int src_idx
, struct mem_cgroup
*dst_memcg
)
532 if (!list_lru_memcg_aware(lru
))
536 memcg_drain_list_lru_node(lru
, i
, src_idx
, dst_memcg
);
539 void memcg_drain_all_list_lrus(int src_idx
, struct mem_cgroup
*dst_memcg
)
541 struct list_lru
*lru
;
543 mutex_lock(&list_lrus_mutex
);
544 list_for_each_entry(lru
, &list_lrus
, list
)
545 memcg_drain_list_lru(lru
, src_idx
, dst_memcg
);
546 mutex_unlock(&list_lrus_mutex
);
549 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
554 static void memcg_destroy_list_lru(struct list_lru
*lru
)
557 #endif /* CONFIG_MEMCG_KMEM */
559 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
560 struct lock_class_key
*key
, struct shrinker
*shrinker
)
563 size_t size
= sizeof(*lru
->node
) * nr_node_ids
;
566 #ifdef CONFIG_MEMCG_KMEM
568 lru
->shrinker_id
= shrinker
->id
;
570 lru
->shrinker_id
= -1;
572 memcg_get_cache_ids();
574 lru
->node
= kzalloc(size
, GFP_KERNEL
);
579 spin_lock_init(&lru
->node
[i
].lock
);
581 lockdep_set_class(&lru
->node
[i
].lock
, key
);
582 init_one_lru(&lru
->node
[i
].lru
);
585 err
= memcg_init_list_lru(lru
, memcg_aware
);
588 /* Do this so a list_lru_destroy() doesn't crash: */
593 list_lru_register(lru
);
595 memcg_put_cache_ids();
598 EXPORT_SYMBOL_GPL(__list_lru_init
);
600 void list_lru_destroy(struct list_lru
*lru
)
602 /* Already destroyed or not yet initialized? */
606 memcg_get_cache_ids();
608 list_lru_unregister(lru
);
610 memcg_destroy_list_lru(lru
);
614 #ifdef CONFIG_MEMCG_KMEM
615 lru
->shrinker_id
= -1;
617 memcg_put_cache_ids();
619 EXPORT_SYMBOL_GPL(list_lru_destroy
);