]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/list_lru.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / mm / list_lru.c
CommitLineData
a38e4082
DC
1/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
3b1d58a4 9#include <linux/mm.h>
a38e4082 10#include <linux/list_lru.h>
5ca302c8 11#include <linux/slab.h>
c0a5b560 12#include <linux/mutex.h>
60d3fd32 13#include <linux/memcontrol.h>
c0a5b560 14
127424c8 15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
c0a5b560
VD
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
127424c8 40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
a38e4082 41
127424c8 42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32
VD
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
145949a1
R
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
60d3fd32
VD
49 return !!lru->node[0].memcg_lrus;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54{
55 /*
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
58 */
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
62
63 return &nlru->lru;
64}
65
df406551
VD
66static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
67{
68 struct page *page;
69
70 if (!memcg_kmem_enabled())
71 return NULL;
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
74}
75
60d3fd32
VD
76static inline struct list_lru_one *
77list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
78{
79 struct mem_cgroup *memcg;
80
81 if (!nlru->memcg_lrus)
82 return &nlru->lru;
83
84 memcg = mem_cgroup_from_kmem(ptr);
85 if (!memcg)
86 return &nlru->lru;
87
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
89}
90#else
91static inline bool list_lru_memcg_aware(struct list_lru *lru)
92{
93 return false;
94}
95
96static inline struct list_lru_one *
97list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
98{
99 return &nlru->lru;
100}
101
102static inline struct list_lru_one *
103list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
104{
105 return &nlru->lru;
106}
127424c8 107#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32 108
a38e4082
DC
109bool list_lru_add(struct list_lru *lru, struct list_head *item)
110{
3b1d58a4
DC
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 113 struct list_lru_one *l;
3b1d58a4
DC
114
115 spin_lock(&nlru->lock);
a38e4082 116 if (list_empty(item)) {
26f5d760 117 l = list_lru_from_kmem(nlru, item);
60d3fd32
VD
118 list_add_tail(item, &l->list);
119 l->nr_items++;
3b1d58a4 120 spin_unlock(&nlru->lock);
a38e4082
DC
121 return true;
122 }
3b1d58a4 123 spin_unlock(&nlru->lock);
a38e4082
DC
124 return false;
125}
126EXPORT_SYMBOL_GPL(list_lru_add);
127
128bool list_lru_del(struct list_lru *lru, struct list_head *item)
129{
3b1d58a4
DC
130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 132 struct list_lru_one *l;
3b1d58a4
DC
133
134 spin_lock(&nlru->lock);
a38e4082 135 if (!list_empty(item)) {
26f5d760 136 l = list_lru_from_kmem(nlru, item);
a38e4082 137 list_del_init(item);
60d3fd32 138 l->nr_items--;
3b1d58a4 139 spin_unlock(&nlru->lock);
a38e4082
DC
140 return true;
141 }
3b1d58a4 142 spin_unlock(&nlru->lock);
a38e4082
DC
143 return false;
144}
145EXPORT_SYMBOL_GPL(list_lru_del);
146
3f97b163
VD
147void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
148{
149 list_del_init(item);
150 list->nr_items--;
151}
152EXPORT_SYMBOL_GPL(list_lru_isolate);
153
154void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
155 struct list_head *head)
156{
157 list_move(item, head);
158 list->nr_items--;
159}
160EXPORT_SYMBOL_GPL(list_lru_isolate_move);
161
60d3fd32
VD
162static unsigned long __list_lru_count_one(struct list_lru *lru,
163 int nid, int memcg_idx)
a38e4082 164{
6a4f496f 165 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32
VD
166 struct list_lru_one *l;
167 unsigned long count;
3b1d58a4 168
6a4f496f 169 spin_lock(&nlru->lock);
60d3fd32 170 l = list_lru_from_memcg_idx(nlru, memcg_idx);
60d3fd32 171 count = l->nr_items;
6a4f496f 172 spin_unlock(&nlru->lock);
3b1d58a4
DC
173
174 return count;
175}
60d3fd32
VD
176
177unsigned long list_lru_count_one(struct list_lru *lru,
178 int nid, struct mem_cgroup *memcg)
179{
180 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
181}
182EXPORT_SYMBOL_GPL(list_lru_count_one);
183
184unsigned long list_lru_count_node(struct list_lru *lru, int nid)
185{
186 long count = 0;
187 int memcg_idx;
188
189 count += __list_lru_count_one(lru, nid, -1);
190 if (list_lru_memcg_aware(lru)) {
191 for_each_memcg_cache_index(memcg_idx)
192 count += __list_lru_count_one(lru, nid, memcg_idx);
193 }
194 return count;
195}
6a4f496f 196EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4 197
60d3fd32
VD
198static unsigned long
199__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
200 list_lru_walk_cb isolate, void *cb_arg,
201 unsigned long *nr_to_walk)
3b1d58a4
DC
202{
203
60d3fd32
VD
204 struct list_lru_node *nlru = &lru->node[nid];
205 struct list_lru_one *l;
a38e4082 206 struct list_head *item, *n;
3b1d58a4 207 unsigned long isolated = 0;
a38e4082 208
3b1d58a4 209 spin_lock(&nlru->lock);
60d3fd32 210 l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e4082 211restart:
60d3fd32 212 list_for_each_safe(item, n, &l->list) {
a38e4082 213 enum lru_status ret;
5cedf721
DC
214
215 /*
216 * decrement nr_to_walk first so that we don't livelock if we
217 * get stuck on large numbesr of LRU_RETRY items
218 */
c56b097a 219 if (!*nr_to_walk)
5cedf721 220 break;
c56b097a 221 --*nr_to_walk;
5cedf721 222
3f97b163 223 ret = isolate(item, l, &nlru->lock, cb_arg);
a38e4082 224 switch (ret) {
449dd698
JW
225 case LRU_REMOVED_RETRY:
226 assert_spin_locked(&nlru->lock);
a38e4082 227 case LRU_REMOVED:
3b1d58a4 228 isolated++;
449dd698
JW
229 /*
230 * If the lru lock has been dropped, our list
231 * traversal is now invalid and so we have to
232 * restart from scratch.
233 */
234 if (ret == LRU_REMOVED_RETRY)
235 goto restart;
a38e4082
DC
236 break;
237 case LRU_ROTATE:
60d3fd32 238 list_move_tail(item, &l->list);
a38e4082
DC
239 break;
240 case LRU_SKIP:
241 break;
242 case LRU_RETRY:
5cedf721
DC
243 /*
244 * The lru lock has been dropped, our list traversal is
245 * now invalid and so we have to restart from scratch.
246 */
449dd698 247 assert_spin_locked(&nlru->lock);
a38e4082
DC
248 goto restart;
249 default:
250 BUG();
251 }
a38e4082 252 }
3b1d58a4
DC
253
254 spin_unlock(&nlru->lock);
255 return isolated;
256}
60d3fd32
VD
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260 list_lru_walk_cb isolate, void *cb_arg,
261 unsigned long *nr_to_walk)
262{
263 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264 isolate, cb_arg, nr_to_walk);
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 long isolated = 0;
273 int memcg_idx;
274
275 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276 nr_to_walk);
277 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278 for_each_memcg_cache_index(memcg_idx) {
279 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280 isolate, cb_arg, nr_to_walk);
281 if (*nr_to_walk <= 0)
282 break;
283 }
284 }
285 return isolated;
286}
3b1d58a4
DC
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
60d3fd32
VD
289static void init_one_lru(struct list_lru_one *l)
290{
291 INIT_LIST_HEAD(&l->list);
292 l->nr_items = 0;
293}
294
127424c8 295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32
VD
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297 int begin, int end)
298{
299 int i;
300
301 for (i = begin; i < end; i++)
302 kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306 int begin, int end)
307{
308 int i;
309
310 for (i = begin; i < end; i++) {
311 struct list_lru_one *l;
312
313 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314 if (!l)
315 goto fail;
316
317 init_one_lru(l);
318 memcg_lrus->lru[i] = l;
319 }
320 return 0;
321fail:
322 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323 return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
328 int size = memcg_nr_cache_ids;
329
330 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
331 if (!nlru->memcg_lrus)
332 return -ENOMEM;
333
334 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
335 kfree(nlru->memcg_lrus);
336 return -ENOMEM;
337 }
338
339 return 0;
340}
341
342static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
343{
344 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
345 kfree(nlru->memcg_lrus);
346}
347
348static int memcg_update_list_lru_node(struct list_lru_node *nlru,
349 int old_size, int new_size)
350{
351 struct list_lru_memcg *old, *new;
352
353 BUG_ON(old_size > new_size);
354
355 old = nlru->memcg_lrus;
356 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
357 if (!new)
358 return -ENOMEM;
359
360 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
361 kfree(new);
362 return -ENOMEM;
363 }
364
365 memcpy(new, old, old_size * sizeof(void *));
366
367 /*
368 * The lock guarantees that we won't race with a reader
369 * (see list_lru_from_memcg_idx).
370 *
371 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
372 * we have to use IRQ-safe primitives here to avoid deadlock.
373 */
374 spin_lock_irq(&nlru->lock);
375 nlru->memcg_lrus = new;
376 spin_unlock_irq(&nlru->lock);
377
378 kfree(old);
379 return 0;
380}
381
382static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
383 int old_size, int new_size)
384{
385 /* do not bother shrinking the array back to the old size, because we
386 * cannot handle allocation failures here */
387 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
388}
389
390static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
391{
392 int i;
393
145949a1
R
394 if (!memcg_aware)
395 return 0;
396
397 for_each_node(i) {
398 if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32
VD
399 goto fail;
400 }
401 return 0;
402fail:
145949a1
R
403 for (i = i - 1; i >= 0; i--) {
404 if (!lru->node[i].memcg_lrus)
405 continue;
60d3fd32 406 memcg_destroy_list_lru_node(&lru->node[i]);
145949a1 407 }
60d3fd32
VD
408 return -ENOMEM;
409}
410
411static void memcg_destroy_list_lru(struct list_lru *lru)
412{
413 int i;
414
415 if (!list_lru_memcg_aware(lru))
416 return;
417
145949a1 418 for_each_node(i)
60d3fd32
VD
419 memcg_destroy_list_lru_node(&lru->node[i]);
420}
421
422static int memcg_update_list_lru(struct list_lru *lru,
423 int old_size, int new_size)
424{
425 int i;
426
427 if (!list_lru_memcg_aware(lru))
428 return 0;
429
145949a1 430 for_each_node(i) {
60d3fd32
VD
431 if (memcg_update_list_lru_node(&lru->node[i],
432 old_size, new_size))
433 goto fail;
434 }
435 return 0;
436fail:
145949a1
R
437 for (i = i - 1; i >= 0; i--) {
438 if (!lru->node[i].memcg_lrus)
439 continue;
440
60d3fd32
VD
441 memcg_cancel_update_list_lru_node(&lru->node[i],
442 old_size, new_size);
145949a1 443 }
60d3fd32
VD
444 return -ENOMEM;
445}
446
447static void memcg_cancel_update_list_lru(struct list_lru *lru,
448 int old_size, int new_size)
449{
450 int i;
451
452 if (!list_lru_memcg_aware(lru))
453 return;
454
145949a1 455 for_each_node(i)
60d3fd32
VD
456 memcg_cancel_update_list_lru_node(&lru->node[i],
457 old_size, new_size);
458}
459
460int memcg_update_all_list_lrus(int new_size)
461{
462 int ret = 0;
463 struct list_lru *lru;
464 int old_size = memcg_nr_cache_ids;
465
466 mutex_lock(&list_lrus_mutex);
467 list_for_each_entry(lru, &list_lrus, list) {
468 ret = memcg_update_list_lru(lru, old_size, new_size);
469 if (ret)
470 goto fail;
471 }
472out:
473 mutex_unlock(&list_lrus_mutex);
474 return ret;
475fail:
476 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
477 memcg_cancel_update_list_lru(lru, old_size, new_size);
478 goto out;
479}
2788cf0c
VD
480
481static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
482 int src_idx, int dst_idx)
483{
484 struct list_lru_one *src, *dst;
485
486 /*
487 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
488 * we have to use IRQ-safe primitives here to avoid deadlock.
489 */
490 spin_lock_irq(&nlru->lock);
491
492 src = list_lru_from_memcg_idx(nlru, src_idx);
493 dst = list_lru_from_memcg_idx(nlru, dst_idx);
494
495 list_splice_init(&src->list, &dst->list);
496 dst->nr_items += src->nr_items;
497 src->nr_items = 0;
498
499 spin_unlock_irq(&nlru->lock);
500}
501
502static void memcg_drain_list_lru(struct list_lru *lru,
503 int src_idx, int dst_idx)
504{
505 int i;
506
507 if (!list_lru_memcg_aware(lru))
508 return;
509
145949a1 510 for_each_node(i)
2788cf0c
VD
511 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
512}
513
514void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
515{
516 struct list_lru *lru;
517
518 mutex_lock(&list_lrus_mutex);
519 list_for_each_entry(lru, &list_lrus, list)
520 memcg_drain_list_lru(lru, src_idx, dst_idx);
521 mutex_unlock(&list_lrus_mutex);
522}
60d3fd32
VD
523#else
524static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
525{
526 return 0;
527}
528
529static void memcg_destroy_list_lru(struct list_lru *lru)
530{
531}
127424c8 532#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32
VD
533
534int __list_lru_init(struct list_lru *lru, bool memcg_aware,
535 struct lock_class_key *key)
a38e4082 536{
3b1d58a4 537 int i;
5ca302c8 538 size_t size = sizeof(*lru->node) * nr_node_ids;
60d3fd32
VD
539 int err = -ENOMEM;
540
541 memcg_get_cache_ids();
5ca302c8
GC
542
543 lru->node = kzalloc(size, GFP_KERNEL);
544 if (!lru->node)
60d3fd32 545 goto out;
a38e4082 546
145949a1 547 for_each_node(i) {
3b1d58a4 548 spin_lock_init(&lru->node[i].lock);
449dd698
JW
549 if (key)
550 lockdep_set_class(&lru->node[i].lock, key);
60d3fd32
VD
551 init_one_lru(&lru->node[i].lru);
552 }
553
554 err = memcg_init_list_lru(lru, memcg_aware);
555 if (err) {
556 kfree(lru->node);
1bc11d70
AP
557 /* Do this so a list_lru_destroy() doesn't crash: */
558 lru->node = NULL;
60d3fd32 559 goto out;
3b1d58a4 560 }
60d3fd32 561
c0a5b560 562 list_lru_register(lru);
60d3fd32
VD
563out:
564 memcg_put_cache_ids();
565 return err;
a38e4082 566}
60d3fd32 567EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8
GC
568
569void list_lru_destroy(struct list_lru *lru)
570{
c0a5b560
VD
571 /* Already destroyed or not yet initialized? */
572 if (!lru->node)
573 return;
60d3fd32
VD
574
575 memcg_get_cache_ids();
576
c0a5b560 577 list_lru_unregister(lru);
60d3fd32
VD
578
579 memcg_destroy_list_lru(lru);
5ca302c8 580 kfree(lru->node);
c0a5b560 581 lru->node = NULL;
60d3fd32
VD
582
583 memcg_put_cache_ids();
5ca302c8
GC
584}
585EXPORT_SYMBOL_GPL(list_lru_destroy);