]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/list_lru.c
powerpc/vdso: Correct call frame information
[mirror_ubuntu-bionic-kernel.git] / mm / list_lru.c
1 /*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
14
15 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
18
19 static void list_lru_register(struct list_lru *lru)
20 {
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24 }
25
26 static void list_lru_unregister(struct list_lru *lru)
27 {
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31 }
32 #else
33 static void list_lru_register(struct list_lru *lru)
34 {
35 }
36
37 static void list_lru_unregister(struct list_lru *lru)
38 {
39 }
40 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
41
42 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43 static inline bool list_lru_memcg_aware(struct list_lru *lru)
44 {
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
49 return !!lru->node[0].memcg_lrus;
50 }
51
52 static inline struct list_lru_one *
53 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54 {
55 /*
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
58 */
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
62
63 return &nlru->lru;
64 }
65
66 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
67 {
68 struct page *page;
69
70 if (!memcg_kmem_enabled())
71 return NULL;
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
74 }
75
76 static inline struct list_lru_one *
77 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
78 {
79 struct mem_cgroup *memcg;
80
81 if (!nlru->memcg_lrus)
82 return &nlru->lru;
83
84 memcg = mem_cgroup_from_kmem(ptr);
85 if (!memcg)
86 return &nlru->lru;
87
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
89 }
90 #else
91 static inline bool list_lru_memcg_aware(struct list_lru *lru)
92 {
93 return false;
94 }
95
96 static inline struct list_lru_one *
97 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
98 {
99 return &nlru->lru;
100 }
101
102 static inline struct list_lru_one *
103 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
104 {
105 return &nlru->lru;
106 }
107 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
108
109 bool list_lru_add(struct list_lru *lru, struct list_head *item)
110 {
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
113 struct list_lru_one *l;
114
115 spin_lock(&nlru->lock);
116 if (list_empty(item)) {
117 l = list_lru_from_kmem(nlru, item);
118 list_add_tail(item, &l->list);
119 l->nr_items++;
120 nlru->nr_items++;
121 spin_unlock(&nlru->lock);
122 return true;
123 }
124 spin_unlock(&nlru->lock);
125 return false;
126 }
127 EXPORT_SYMBOL_GPL(list_lru_add);
128
129 bool list_lru_del(struct list_lru *lru, struct list_head *item)
130 {
131 int nid = page_to_nid(virt_to_page(item));
132 struct list_lru_node *nlru = &lru->node[nid];
133 struct list_lru_one *l;
134
135 spin_lock(&nlru->lock);
136 if (!list_empty(item)) {
137 l = list_lru_from_kmem(nlru, item);
138 list_del_init(item);
139 l->nr_items--;
140 nlru->nr_items--;
141 spin_unlock(&nlru->lock);
142 return true;
143 }
144 spin_unlock(&nlru->lock);
145 return false;
146 }
147 EXPORT_SYMBOL_GPL(list_lru_del);
148
149 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
150 {
151 list_del_init(item);
152 list->nr_items--;
153 }
154 EXPORT_SYMBOL_GPL(list_lru_isolate);
155
156 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
157 struct list_head *head)
158 {
159 list_move(item, head);
160 list->nr_items--;
161 }
162 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
163
164 static unsigned long __list_lru_count_one(struct list_lru *lru,
165 int nid, int memcg_idx)
166 {
167 struct list_lru_node *nlru = &lru->node[nid];
168 struct list_lru_one *l;
169 unsigned long count;
170
171 spin_lock(&nlru->lock);
172 l = list_lru_from_memcg_idx(nlru, memcg_idx);
173 count = l->nr_items;
174 spin_unlock(&nlru->lock);
175
176 return count;
177 }
178
179 unsigned long list_lru_count_one(struct list_lru *lru,
180 int nid, struct mem_cgroup *memcg)
181 {
182 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
183 }
184 EXPORT_SYMBOL_GPL(list_lru_count_one);
185
186 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
187 {
188 struct list_lru_node *nlru;
189
190 nlru = &lru->node[nid];
191 return nlru->nr_items;
192 }
193 EXPORT_SYMBOL_GPL(list_lru_count_node);
194
195 static unsigned long
196 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
197 list_lru_walk_cb isolate, void *cb_arg,
198 unsigned long *nr_to_walk)
199 {
200
201 struct list_lru_node *nlru = &lru->node[nid];
202 struct list_lru_one *l;
203 struct list_head *item, *n;
204 unsigned long isolated = 0;
205
206 spin_lock(&nlru->lock);
207 l = list_lru_from_memcg_idx(nlru, memcg_idx);
208 restart:
209 list_for_each_safe(item, n, &l->list) {
210 enum lru_status ret;
211
212 /*
213 * decrement nr_to_walk first so that we don't livelock if we
214 * get stuck on large numbesr of LRU_RETRY items
215 */
216 if (!*nr_to_walk)
217 break;
218 --*nr_to_walk;
219
220 ret = isolate(item, l, &nlru->lock, cb_arg);
221 switch (ret) {
222 case LRU_REMOVED_RETRY:
223 assert_spin_locked(&nlru->lock);
224 /* fall through */
225 case LRU_REMOVED:
226 isolated++;
227 nlru->nr_items--;
228 /*
229 * If the lru lock has been dropped, our list
230 * traversal is now invalid and so we have to
231 * restart from scratch.
232 */
233 if (ret == LRU_REMOVED_RETRY)
234 goto restart;
235 break;
236 case LRU_ROTATE:
237 list_move_tail(item, &l->list);
238 break;
239 case LRU_SKIP:
240 break;
241 case LRU_RETRY:
242 /*
243 * The lru lock has been dropped, our list traversal is
244 * now invalid and so we have to restart from scratch.
245 */
246 assert_spin_locked(&nlru->lock);
247 goto restart;
248 default:
249 BUG();
250 }
251 }
252
253 spin_unlock(&nlru->lock);
254 return isolated;
255 }
256
257 unsigned long
258 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259 list_lru_walk_cb isolate, void *cb_arg,
260 unsigned long *nr_to_walk)
261 {
262 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
263 isolate, cb_arg, nr_to_walk);
264 }
265 EXPORT_SYMBOL_GPL(list_lru_walk_one);
266
267 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
268 list_lru_walk_cb isolate, void *cb_arg,
269 unsigned long *nr_to_walk)
270 {
271 long isolated = 0;
272 int memcg_idx;
273
274 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
275 nr_to_walk);
276 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
277 for_each_memcg_cache_index(memcg_idx) {
278 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
279 isolate, cb_arg, nr_to_walk);
280 if (*nr_to_walk <= 0)
281 break;
282 }
283 }
284 return isolated;
285 }
286 EXPORT_SYMBOL_GPL(list_lru_walk_node);
287
288 static void init_one_lru(struct list_lru_one *l)
289 {
290 INIT_LIST_HEAD(&l->list);
291 l->nr_items = 0;
292 }
293
294 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
295 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
296 int begin, int end)
297 {
298 int i;
299
300 for (i = begin; i < end; i++)
301 kfree(memcg_lrus->lru[i]);
302 }
303
304 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
305 int begin, int end)
306 {
307 int i;
308
309 for (i = begin; i < end; i++) {
310 struct list_lru_one *l;
311
312 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
313 if (!l)
314 goto fail;
315
316 init_one_lru(l);
317 memcg_lrus->lru[i] = l;
318 }
319 return 0;
320 fail:
321 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
322 return -ENOMEM;
323 }
324
325 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
326 {
327 int size = memcg_nr_cache_ids;
328
329 nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL);
330 if (!nlru->memcg_lrus)
331 return -ENOMEM;
332
333 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
334 kvfree(nlru->memcg_lrus);
335 return -ENOMEM;
336 }
337
338 return 0;
339 }
340
341 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
342 {
343 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
344 kvfree(nlru->memcg_lrus);
345 }
346
347 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
348 int old_size, int new_size)
349 {
350 struct list_lru_memcg *old, *new;
351
352 BUG_ON(old_size > new_size);
353
354 old = nlru->memcg_lrus;
355 new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL);
356 if (!new)
357 return -ENOMEM;
358
359 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
360 kvfree(new);
361 return -ENOMEM;
362 }
363
364 memcpy(new, old, old_size * sizeof(void *));
365
366 /*
367 * The lock guarantees that we won't race with a reader
368 * (see list_lru_from_memcg_idx).
369 *
370 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
371 * we have to use IRQ-safe primitives here to avoid deadlock.
372 */
373 spin_lock_irq(&nlru->lock);
374 nlru->memcg_lrus = new;
375 spin_unlock_irq(&nlru->lock);
376
377 kvfree(old);
378 return 0;
379 }
380
381 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
382 int old_size, int new_size)
383 {
384 /* do not bother shrinking the array back to the old size, because we
385 * cannot handle allocation failures here */
386 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
387 }
388
389 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
390 {
391 int i;
392
393 if (!memcg_aware)
394 return 0;
395
396 for_each_node(i) {
397 if (memcg_init_list_lru_node(&lru->node[i]))
398 goto fail;
399 }
400 return 0;
401 fail:
402 for (i = i - 1; i >= 0; i--) {
403 if (!lru->node[i].memcg_lrus)
404 continue;
405 memcg_destroy_list_lru_node(&lru->node[i]);
406 }
407 return -ENOMEM;
408 }
409
410 static void memcg_destroy_list_lru(struct list_lru *lru)
411 {
412 int i;
413
414 if (!list_lru_memcg_aware(lru))
415 return;
416
417 for_each_node(i)
418 memcg_destroy_list_lru_node(&lru->node[i]);
419 }
420
421 static int memcg_update_list_lru(struct list_lru *lru,
422 int old_size, int new_size)
423 {
424 int i;
425
426 if (!list_lru_memcg_aware(lru))
427 return 0;
428
429 for_each_node(i) {
430 if (memcg_update_list_lru_node(&lru->node[i],
431 old_size, new_size))
432 goto fail;
433 }
434 return 0;
435 fail:
436 for (i = i - 1; i >= 0; i--) {
437 if (!lru->node[i].memcg_lrus)
438 continue;
439
440 memcg_cancel_update_list_lru_node(&lru->node[i],
441 old_size, new_size);
442 }
443 return -ENOMEM;
444 }
445
446 static void memcg_cancel_update_list_lru(struct list_lru *lru,
447 int old_size, int new_size)
448 {
449 int i;
450
451 if (!list_lru_memcg_aware(lru))
452 return;
453
454 for_each_node(i)
455 memcg_cancel_update_list_lru_node(&lru->node[i],
456 old_size, new_size);
457 }
458
459 int memcg_update_all_list_lrus(int new_size)
460 {
461 int ret = 0;
462 struct list_lru *lru;
463 int old_size = memcg_nr_cache_ids;
464
465 mutex_lock(&list_lrus_mutex);
466 list_for_each_entry(lru, &list_lrus, list) {
467 ret = memcg_update_list_lru(lru, old_size, new_size);
468 if (ret)
469 goto fail;
470 }
471 out:
472 mutex_unlock(&list_lrus_mutex);
473 return ret;
474 fail:
475 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
476 memcg_cancel_update_list_lru(lru, old_size, new_size);
477 goto out;
478 }
479
480 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
481 int src_idx, int dst_idx)
482 {
483 struct list_lru_one *src, *dst;
484
485 /*
486 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
487 * we have to use IRQ-safe primitives here to avoid deadlock.
488 */
489 spin_lock_irq(&nlru->lock);
490
491 src = list_lru_from_memcg_idx(nlru, src_idx);
492 dst = list_lru_from_memcg_idx(nlru, dst_idx);
493
494 list_splice_init(&src->list, &dst->list);
495 dst->nr_items += src->nr_items;
496 src->nr_items = 0;
497
498 spin_unlock_irq(&nlru->lock);
499 }
500
501 static void memcg_drain_list_lru(struct list_lru *lru,
502 int src_idx, int dst_idx)
503 {
504 int i;
505
506 if (!list_lru_memcg_aware(lru))
507 return;
508
509 for_each_node(i)
510 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
511 }
512
513 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
514 {
515 struct list_lru *lru;
516
517 mutex_lock(&list_lrus_mutex);
518 list_for_each_entry(lru, &list_lrus, list)
519 memcg_drain_list_lru(lru, src_idx, dst_idx);
520 mutex_unlock(&list_lrus_mutex);
521 }
522 #else
523 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
524 {
525 return 0;
526 }
527
528 static void memcg_destroy_list_lru(struct list_lru *lru)
529 {
530 }
531 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
532
533 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
534 struct lock_class_key *key)
535 {
536 int i;
537 size_t size = sizeof(*lru->node) * nr_node_ids;
538 int err = -ENOMEM;
539
540 memcg_get_cache_ids();
541
542 lru->node = kzalloc(size, GFP_KERNEL);
543 if (!lru->node)
544 goto out;
545
546 for_each_node(i) {
547 spin_lock_init(&lru->node[i].lock);
548 if (key)
549 lockdep_set_class(&lru->node[i].lock, key);
550 init_one_lru(&lru->node[i].lru);
551 }
552
553 err = memcg_init_list_lru(lru, memcg_aware);
554 if (err) {
555 kfree(lru->node);
556 /* Do this so a list_lru_destroy() doesn't crash: */
557 lru->node = NULL;
558 goto out;
559 }
560
561 list_lru_register(lru);
562 out:
563 memcg_put_cache_ids();
564 return err;
565 }
566 EXPORT_SYMBOL_GPL(__list_lru_init);
567
568 void list_lru_destroy(struct list_lru *lru)
569 {
570 /* Already destroyed or not yet initialized? */
571 if (!lru->node)
572 return;
573
574 memcg_get_cache_ids();
575
576 list_lru_unregister(lru);
577
578 memcg_destroy_list_lru(lru);
579 kfree(lru->node);
580 lru->node = NULL;
581
582 memcg_put_cache_ids();
583 }
584 EXPORT_SYMBOL_GPL(list_lru_destroy);