]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/memcontrol.h
vmscan: per memory cgroup slab shrinkers
[mirror_ubuntu-artful-kernel.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
f8d66542 22#include <linux/cgroup.h>
456f998e 23#include <linux/vm_event_item.h>
7ae1e1d0 24#include <linux/hardirq.h>
a8964b9b 25#include <linux/jump_label.h>
456f998e 26
78fb7466 27struct mem_cgroup;
8697d331
BS
28struct page;
29struct mm_struct;
2633d7a0 30struct kmem_cache;
78fb7466 31
68b4876d
SZ
32/*
33 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
34 * These two lists should keep in accord with each other.
35 */
36enum mem_cgroup_stat_index {
37 /*
38 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
39 */
40 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
3ea67d06 44 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
68b4876d
SZ
45 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
46 MEM_CGROUP_STAT_NSTATS,
2a7106f2
GT
47};
48
5660048c
JW
49struct mem_cgroup_reclaim_cookie {
50 struct zone *zone;
51 int priority;
52 unsigned int generation;
53};
54
241994ed
JW
55enum mem_cgroup_events_index {
56 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
57 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
58 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
59 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
60 MEM_CGROUP_EVENTS_NSTATS,
61 /* default hierarchy events */
62 MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
63 MEMCG_HIGH,
64 MEMCG_MAX,
65 MEMCG_OOM,
66 MEMCG_NR_EVENTS,
67};
68
c255a458 69#ifdef CONFIG_MEMCG
241994ed
JW
70void mem_cgroup_events(struct mem_cgroup *memcg,
71 enum mem_cgroup_events_index idx,
72 unsigned int nr);
73
74bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
75
00501b53
JW
76int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
77 gfp_t gfp_mask, struct mem_cgroup **memcgp);
78void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
79 bool lrucare);
80void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
0a31bc97 81void mem_cgroup_uncharge(struct page *page);
747db954 82void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846d 83
0a31bc97
JW
84void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
85 bool lrucare);
569b846d 86
0a31bc97
JW
87struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
88struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
c9b0ed51 89
2314b42d
JW
90bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
91 struct mem_cgroup *root);
92bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
3062fc67 93
e42d9d5d 94extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad2
BS
95extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
96
e1aab161 97extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
182446d0 98extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
e1aab161 99
2314b42d
JW
100static inline bool mm_match_cgroup(struct mm_struct *mm,
101 struct mem_cgroup *memcg)
2e4d4091 102{
587af308 103 struct mem_cgroup *task_memcg;
413918bb 104 bool match = false;
c3ac9a8a 105
2e4d4091 106 rcu_read_lock();
587af308 107 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 108 if (task_memcg)
2314b42d 109 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 110 rcu_read_unlock();
c3ac9a8a 111 return match;
2e4d4091 112}
8a9f3ccd 113
c0ff4b85 114extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b 115
694fbc0f
AM
116struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
117 struct mem_cgroup *,
118 struct mem_cgroup_reclaim_cookie *);
5660048c
JW
119void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
120
58ae83db
KH
121/*
122 * For memory reclaim.
123 */
c56d5c7d 124int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
90cbc250 125bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
889976db 126int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
4d7dcca2 127unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
fa9add64 128void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
e222432b
BS
129extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
130 struct task_struct *p);
58ae83db 131
49426420 132static inline void mem_cgroup_oom_enable(void)
519e5247 133{
49426420
JW
134 WARN_ON(current->memcg_oom.may_oom);
135 current->memcg_oom.may_oom = 1;
519e5247
JW
136}
137
49426420 138static inline void mem_cgroup_oom_disable(void)
519e5247 139{
49426420
JW
140 WARN_ON(!current->memcg_oom.may_oom);
141 current->memcg_oom.may_oom = 0;
519e5247
JW
142}
143
3812c8c8
JW
144static inline bool task_in_memcg_oom(struct task_struct *p)
145{
49426420 146 return p->memcg_oom.memcg;
3812c8c8
JW
147}
148
49426420 149bool mem_cgroup_oom_synchronize(bool wait);
3812c8c8 150
c255a458 151#ifdef CONFIG_MEMCG_SWAP
c077719b
KH
152extern int do_swap_account;
153#endif
f8d66542
HT
154
155static inline bool mem_cgroup_disabled(void)
156{
073219e9 157 if (memory_cgrp_subsys.disabled)
f8d66542
HT
158 return true;
159 return false;
160}
161
6de22619 162struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
d7365e78
JW
163void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
164 enum mem_cgroup_stat_index idx, int val);
6de22619 165void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
d7365e78
JW
166
167static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
68b4876d 168 enum mem_cgroup_stat_index idx)
2a7106f2 169{
d7365e78 170 mem_cgroup_update_page_stat(memcg, idx, 1);
2a7106f2
GT
171}
172
d7365e78 173static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
68b4876d 174 enum mem_cgroup_stat_index idx)
2a7106f2 175{
d7365e78 176 mem_cgroup_update_page_stat(memcg, idx, -1);
2a7106f2
GT
177}
178
0608f43d
AM
179unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
180 gfp_t gfp_mask,
181 unsigned long *total_scanned);
a63d83f4 182
68ae564b
DR
183void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
184static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
185 enum vm_event_item idx)
186{
187 if (mem_cgroup_disabled())
188 return;
189 __mem_cgroup_count_vm_event(mm, idx);
190}
ca3e0214 191#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 192void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
193#endif
194
c255a458 195#else /* CONFIG_MEMCG */
7a81b88c
KH
196struct mem_cgroup;
197
241994ed
JW
198static inline void mem_cgroup_events(struct mem_cgroup *memcg,
199 enum mem_cgroup_events_index idx,
200 unsigned int nr)
201{
202}
203
204static inline bool mem_cgroup_low(struct mem_cgroup *root,
205 struct mem_cgroup *memcg)
206{
207 return false;
208}
209
00501b53
JW
210static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
211 gfp_t gfp_mask,
212 struct mem_cgroup **memcgp)
7a81b88c 213{
00501b53 214 *memcgp = NULL;
7a81b88c
KH
215 return 0;
216}
217
00501b53
JW
218static inline void mem_cgroup_commit_charge(struct page *page,
219 struct mem_cgroup *memcg,
220 bool lrucare)
7a81b88c
KH
221{
222}
223
00501b53
JW
224static inline void mem_cgroup_cancel_charge(struct page *page,
225 struct mem_cgroup *memcg)
7a81b88c
KH
226{
227}
228
0a31bc97 229static inline void mem_cgroup_uncharge(struct page *page)
569b846d
KH
230{
231}
232
747db954 233static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
234{
235}
236
0a31bc97
JW
237static inline void mem_cgroup_migrate(struct page *oldpage,
238 struct page *newpage,
239 bool lrucare)
69029cd5
KH
240{
241}
242
925b7673
JW
243static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
244 struct mem_cgroup *memcg)
08e552c6 245{
925b7673 246 return &zone->lruvec;
08e552c6
KH
247}
248
fa9add64
HD
249static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
250 struct zone *zone)
66e1707b 251{
925b7673 252 return &zone->lruvec;
66e1707b
BS
253}
254
e42d9d5d
WF
255static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
256{
257 return NULL;
258}
259
587af308 260static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 261 struct mem_cgroup *memcg)
bed7161a 262{
587af308 263 return true;
bed7161a
BS
264}
265
ffbdccf5
DR
266static inline bool task_in_mem_cgroup(struct task_struct *task,
267 const struct mem_cgroup *memcg)
4c4a2214 268{
ffbdccf5 269 return true;
4c4a2214
DR
270}
271
c0ff4b85
R
272static inline struct cgroup_subsys_state
273 *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b
WF
274{
275 return NULL;
276}
277
5660048c
JW
278static inline struct mem_cgroup *
279mem_cgroup_iter(struct mem_cgroup *root,
280 struct mem_cgroup *prev,
281 struct mem_cgroup_reclaim_cookie *reclaim)
282{
283 return NULL;
284}
285
286static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
287 struct mem_cgroup *prev)
288{
289}
290
f8d66542
HT
291static inline bool mem_cgroup_disabled(void)
292{
293 return true;
294}
a636b327 295
14797e23 296static inline int
c56d5c7d 297mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23
KM
298{
299 return 1;
300}
301
90cbc250
VD
302static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
303{
304 return true;
305}
306
a3d8e054 307static inline unsigned long
4d7dcca2 308mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
a3d8e054
KM
309{
310 return 0;
311}
312
fa9add64
HD
313static inline void
314mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
315 int increment)
3e2f41f1 316{
3e2f41f1
KM
317}
318
e222432b
BS
319static inline void
320mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
321{
322}
323
6de22619 324static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
89c06bd5 325{
d7365e78 326 return NULL;
89c06bd5
KH
327}
328
6de22619 329static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
89c06bd5
KH
330{
331}
332
49426420 333static inline void mem_cgroup_oom_enable(void)
519e5247
JW
334{
335}
336
49426420 337static inline void mem_cgroup_oom_disable(void)
519e5247
JW
338{
339}
340
3812c8c8
JW
341static inline bool task_in_memcg_oom(struct task_struct *p)
342{
343 return false;
344}
345
49426420 346static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
347{
348 return false;
349}
350
d7365e78 351static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
68b4876d 352 enum mem_cgroup_stat_index idx)
2a7106f2
GT
353{
354}
355
d7365e78 356static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
68b4876d 357 enum mem_cgroup_stat_index idx)
d69b042f
BS
358{
359}
360
4e416953 361static inline
0608f43d
AM
362unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
363 gfp_t gfp_mask,
364 unsigned long *total_scanned)
4e416953 365{
0608f43d 366 return 0;
4e416953
BS
367}
368
e94c8a9c 369static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
370{
371}
372
456f998e
YH
373static inline
374void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
375{
376}
c255a458 377#endif /* CONFIG_MEMCG */
78fb7466 378
e1aab161
GC
379enum {
380 UNDER_LIMIT,
381 SOFT_LIMIT,
382 OVER_LIMIT,
383};
384
385struct sock;
cd59085a 386#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161
GC
387void sock_update_memcg(struct sock *sk);
388void sock_release_memcg(struct sock *sk);
389#else
390static inline void sock_update_memcg(struct sock *sk)
391{
392}
393static inline void sock_release_memcg(struct sock *sk)
394{
395}
cd59085a 396#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
7ae1e1d0
GC
397
398#ifdef CONFIG_MEMCG_KMEM
a8964b9b 399extern struct static_key memcg_kmem_enabled_key;
749c5415
GC
400
401extern int memcg_limited_groups_array_size;
ebe945c2
GC
402
403/*
404 * Helper macro to loop through all memcg-specific caches. Callers must still
405 * check if the cache is valid (it is either valid or NULL).
406 * the slab_mutex must be held when looping through those caches
407 */
749c5415 408#define for_each_memcg_cache_index(_idx) \
91c777d8 409 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
749c5415 410
7ae1e1d0
GC
411static inline bool memcg_kmem_enabled(void)
412{
a8964b9b 413 return static_key_false(&memcg_kmem_enabled_key);
7ae1e1d0
GC
414}
415
cb731d6c
VD
416bool memcg_kmem_is_active(struct mem_cgroup *memcg);
417
7ae1e1d0
GC
418/*
419 * In general, we'll do everything in our power to not incur in any overhead
420 * for non-memcg users for the kmem functions. Not even a function call, if we
421 * can avoid it.
422 *
423 * Therefore, we'll inline all those functions so that in the best case, we'll
424 * see that kmemcg is off for everybody and proceed quickly. If it is on,
425 * we'll still do most of the flag checking inline. We check a lot of
426 * conditions, but because they are pretty simple, they are expected to be
427 * fast.
428 */
429bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
430 int order);
431void __memcg_kmem_commit_charge(struct page *page,
432 struct mem_cgroup *memcg, int order);
433void __memcg_kmem_uncharge_pages(struct page *page, int order);
434
2633d7a0 435int memcg_cache_id(struct mem_cgroup *memcg);
5722d094 436
55007d84 437void memcg_update_array_size(int num_groups);
d7f25f8a 438
8135be5a
VD
439struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
440void __memcg_kmem_put_cache(struct kmem_cache *cachep);
d7f25f8a 441
dbf22eb6
VD
442int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
443 unsigned long nr_pages);
444void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
5dfb4175 445
7ae1e1d0
GC
446/**
447 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
448 * @gfp: the gfp allocation flags.
449 * @memcg: a pointer to the memcg this was charged against.
450 * @order: allocation order.
451 *
452 * returns true if the memcg where the current task belongs can hold this
453 * allocation.
454 *
455 * We return true automatically if this allocation is not to be accounted to
456 * any memcg.
457 */
458static inline bool
459memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
460{
461 if (!memcg_kmem_enabled())
462 return true;
463
464 /*
465 * __GFP_NOFAIL allocations will move on even if charging is not
466 * possible. Therefore we don't even try, and have this allocation
3e32cb2e
JW
467 * unaccounted. We could in theory charge it forcibly, but we hope
468 * those allocations are rare, and won't be worth the trouble.
7ae1e1d0 469 */
52383431 470 if (gfp & __GFP_NOFAIL)
7ae1e1d0
GC
471 return true;
472 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
473 return true;
474
475 /* If the test is dying, just let it go. */
476 if (unlikely(fatal_signal_pending(current)))
477 return true;
478
479 return __memcg_kmem_newpage_charge(gfp, memcg, order);
480}
481
482/**
483 * memcg_kmem_uncharge_pages: uncharge pages from memcg
484 * @page: pointer to struct page being freed
485 * @order: allocation order.
7ae1e1d0
GC
486 */
487static inline void
488memcg_kmem_uncharge_pages(struct page *page, int order)
489{
490 if (memcg_kmem_enabled())
491 __memcg_kmem_uncharge_pages(page, order);
492}
493
494/**
495 * memcg_kmem_commit_charge: embeds correct memcg in a page
496 * @page: pointer to struct page recently allocated
497 * @memcg: the memcg structure we charged against
498 * @order: allocation order.
499 *
500 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
501 * failure of the allocation. if @page is NULL, this function will revert the
1306a85a 502 * charges. Otherwise, it will commit @page to @memcg.
7ae1e1d0
GC
503 */
504static inline void
505memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
506{
507 if (memcg_kmem_enabled() && memcg)
508 __memcg_kmem_commit_charge(page, memcg, order);
509}
510
d7f25f8a
GC
511/**
512 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
513 * @cachep: the original global kmem cache
514 * @gfp: allocation flags.
515 *
5dfb4175 516 * All memory allocated from a per-memcg cache is charged to the owner memcg.
d7f25f8a
GC
517 */
518static __always_inline struct kmem_cache *
519memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
520{
521 if (!memcg_kmem_enabled())
522 return cachep;
523 if (gfp & __GFP_NOFAIL)
524 return cachep;
525 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
526 return cachep;
527 if (unlikely(fatal_signal_pending(current)))
528 return cachep;
529
056b7cce 530 return __memcg_kmem_get_cache(cachep);
d7f25f8a 531}
8135be5a
VD
532
533static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
534{
535 if (memcg_kmem_enabled())
536 __memcg_kmem_put_cache(cachep);
537}
7ae1e1d0 538#else
749c5415
GC
539#define for_each_memcg_cache_index(_idx) \
540 for (; NULL; )
541
b9ce5ef4
GC
542static inline bool memcg_kmem_enabled(void)
543{
544 return false;
545}
546
cb731d6c
VD
547static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
548{
549 return false;
550}
551
7ae1e1d0
GC
552static inline bool
553memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
554{
555 return true;
556}
557
558static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
559{
560}
561
562static inline void
563memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
564{
565}
2633d7a0
GC
566
567static inline int memcg_cache_id(struct mem_cgroup *memcg)
568{
569 return -1;
570}
571
d7f25f8a
GC
572static inline struct kmem_cache *
573memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
574{
575 return cachep;
576}
8135be5a
VD
577
578static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
579{
580}
7ae1e1d0 581#endif /* CONFIG_MEMCG_KMEM */
8cdea7c0
BS
582#endif /* _LINUX_MEMCONTROL_H */
583