1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/swap.h>
35 #include <linux/spinlock.h>
37 #include <linux/seq_file.h>
38 #include <linux/vmalloc.h>
39 #include <linux/mm_inline.h>
40 #include <linux/page_cgroup.h>
41 #include <linux/cpu.h>
44 #include <asm/uaccess.h>
46 struct cgroup_subsys mem_cgroup_subsys __read_mostly
;
47 #define MEM_CGROUP_RECLAIM_RETRIES 5
48 struct mem_cgroup
*root_mem_cgroup __read_mostly
;
50 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
51 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
52 int do_swap_account __read_mostly
;
53 static int really_do_swap_account __initdata
= 1; /* for remember boot option*/
55 #define do_swap_account (0)
58 static DEFINE_MUTEX(memcg_tasklist
); /* can be hold under cgroup_mutex */
59 #define SOFTLIMIT_EVENTS_THRESH (1000)
62 * Statistics for memory cgroup.
64 enum mem_cgroup_stat_index
{
66 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
68 MEM_CGROUP_STAT_CACHE
, /* # of pages charged as cache */
69 MEM_CGROUP_STAT_RSS
, /* # of pages charged as anon rss */
70 MEM_CGROUP_STAT_FILE_MAPPED
, /* # of pages charged as file rss */
71 MEM_CGROUP_STAT_PGPGIN_COUNT
, /* # of pages paged in */
72 MEM_CGROUP_STAT_PGPGOUT_COUNT
, /* # of pages paged out */
73 MEM_CGROUP_STAT_EVENTS
, /* sum of pagein + pageout for internal use */
74 MEM_CGROUP_STAT_SWAPOUT
, /* # of pages, swapped out */
76 MEM_CGROUP_STAT_NSTATS
,
79 struct mem_cgroup_stat_cpu
{
80 s64 count
[MEM_CGROUP_STAT_NSTATS
];
81 } ____cacheline_aligned_in_smp
;
83 struct mem_cgroup_stat
{
84 struct mem_cgroup_stat_cpu cpustat
[0];
88 __mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu
*stat
,
89 enum mem_cgroup_stat_index idx
)
95 __mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu
*stat
,
96 enum mem_cgroup_stat_index idx
)
98 return stat
->count
[idx
];
102 * For accounting under irq disable, no need for increment preempt count.
104 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu
*stat
,
105 enum mem_cgroup_stat_index idx
, int val
)
107 stat
->count
[idx
] += val
;
110 static s64
mem_cgroup_read_stat(struct mem_cgroup_stat
*stat
,
111 enum mem_cgroup_stat_index idx
)
115 for_each_possible_cpu(cpu
)
116 ret
+= stat
->cpustat
[cpu
].count
[idx
];
120 static s64
mem_cgroup_local_usage(struct mem_cgroup_stat
*stat
)
124 ret
= mem_cgroup_read_stat(stat
, MEM_CGROUP_STAT_CACHE
);
125 ret
+= mem_cgroup_read_stat(stat
, MEM_CGROUP_STAT_RSS
);
130 * per-zone information in memory controller.
132 struct mem_cgroup_per_zone
{
134 * spin_lock to protect the per cgroup LRU
136 struct list_head lists
[NR_LRU_LISTS
];
137 unsigned long count
[NR_LRU_LISTS
];
139 struct zone_reclaim_stat reclaim_stat
;
140 struct rb_node tree_node
; /* RB tree node */
141 unsigned long long usage_in_excess
;/* Set to the value by which */
142 /* the soft limit is exceeded*/
144 struct mem_cgroup
*mem
; /* Back pointer, we cannot */
145 /* use container_of */
147 /* Macro for accessing counter */
148 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
150 struct mem_cgroup_per_node
{
151 struct mem_cgroup_per_zone zoneinfo
[MAX_NR_ZONES
];
154 struct mem_cgroup_lru_info
{
155 struct mem_cgroup_per_node
*nodeinfo
[MAX_NUMNODES
];
159 * Cgroups above their limits are maintained in a RB-Tree, independent of
160 * their hierarchy representation
163 struct mem_cgroup_tree_per_zone
{
164 struct rb_root rb_root
;
168 struct mem_cgroup_tree_per_node
{
169 struct mem_cgroup_tree_per_zone rb_tree_per_zone
[MAX_NR_ZONES
];
172 struct mem_cgroup_tree
{
173 struct mem_cgroup_tree_per_node
*rb_tree_per_node
[MAX_NUMNODES
];
176 static struct mem_cgroup_tree soft_limit_tree __read_mostly
;
179 * The memory controller data structure. The memory controller controls both
180 * page cache and RSS per cgroup. We would eventually like to provide
181 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
182 * to help the administrator determine what knobs to tune.
184 * TODO: Add a water mark for the memory controller. Reclaim will begin when
185 * we hit the water mark. May be even add a low water mark, such that
186 * no reclaim occurs from a cgroup at it's low water mark, this is
187 * a feature that will be implemented much later in the future.
190 struct cgroup_subsys_state css
;
192 * the counter to account for memory usage
194 struct res_counter res
;
196 * the counter to account for mem+swap usage.
198 struct res_counter memsw
;
200 * Per cgroup active and inactive list, similar to the
201 * per zone LRU lists.
203 struct mem_cgroup_lru_info info
;
206 protect against reclaim related member.
208 spinlock_t reclaim_param_lock
;
210 int prev_priority
; /* for recording reclaim priority */
213 * While reclaiming in a hierarchy, we cache the last child we
216 int last_scanned_child
;
218 * Should the accounting and control be hierarchical, per subtree?
221 unsigned long last_oom_jiffies
;
224 unsigned int swappiness
;
226 /* set when res.limit == memsw.limit */
227 bool memsw_is_minimum
;
230 * statistics. This must be placed at the end of memcg.
232 struct mem_cgroup_stat stat
;
236 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
237 * limit reclaim to prevent infinite loops, if they ever occur.
239 #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
240 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
243 MEM_CGROUP_CHARGE_TYPE_CACHE
= 0,
244 MEM_CGROUP_CHARGE_TYPE_MAPPED
,
245 MEM_CGROUP_CHARGE_TYPE_SHMEM
, /* used by page migration of shmem */
246 MEM_CGROUP_CHARGE_TYPE_FORCE
, /* used by force_empty */
247 MEM_CGROUP_CHARGE_TYPE_SWAPOUT
, /* for accounting swapcache */
248 MEM_CGROUP_CHARGE_TYPE_DROP
, /* a page was unused swap cache */
252 /* only for here (for easy reading.) */
253 #define PCGF_CACHE (1UL << PCG_CACHE)
254 #define PCGF_USED (1UL << PCG_USED)
255 #define PCGF_LOCK (1UL << PCG_LOCK)
256 /* Not used, but added here for completeness */
257 #define PCGF_ACCT (1UL << PCG_ACCT)
259 /* for encoding cft->private value on file */
262 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
263 #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
264 #define MEMFILE_ATTR(val) ((val) & 0xffff)
267 * Reclaim flags for mem_cgroup_hierarchical_reclaim
269 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
270 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
271 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
272 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
273 #define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
274 #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
276 static void mem_cgroup_get(struct mem_cgroup
*mem
);
277 static void mem_cgroup_put(struct mem_cgroup
*mem
);
278 static struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*mem
);
279 static void drain_all_stock_async(void);
281 static struct mem_cgroup_per_zone
*
282 mem_cgroup_zoneinfo(struct mem_cgroup
*mem
, int nid
, int zid
)
284 return &mem
->info
.nodeinfo
[nid
]->zoneinfo
[zid
];
287 static struct mem_cgroup_per_zone
*
288 page_cgroup_zoneinfo(struct page_cgroup
*pc
)
290 struct mem_cgroup
*mem
= pc
->mem_cgroup
;
291 int nid
= page_cgroup_nid(pc
);
292 int zid
= page_cgroup_zid(pc
);
297 return mem_cgroup_zoneinfo(mem
, nid
, zid
);
300 static struct mem_cgroup_tree_per_zone
*
301 soft_limit_tree_node_zone(int nid
, int zid
)
303 return &soft_limit_tree
.rb_tree_per_node
[nid
]->rb_tree_per_zone
[zid
];
306 static struct mem_cgroup_tree_per_zone
*
307 soft_limit_tree_from_page(struct page
*page
)
309 int nid
= page_to_nid(page
);
310 int zid
= page_zonenum(page
);
312 return &soft_limit_tree
.rb_tree_per_node
[nid
]->rb_tree_per_zone
[zid
];
316 __mem_cgroup_insert_exceeded(struct mem_cgroup
*mem
,
317 struct mem_cgroup_per_zone
*mz
,
318 struct mem_cgroup_tree_per_zone
*mctz
,
319 unsigned long long new_usage_in_excess
)
321 struct rb_node
**p
= &mctz
->rb_root
.rb_node
;
322 struct rb_node
*parent
= NULL
;
323 struct mem_cgroup_per_zone
*mz_node
;
328 mz
->usage_in_excess
= new_usage_in_excess
;
329 if (!mz
->usage_in_excess
)
333 mz_node
= rb_entry(parent
, struct mem_cgroup_per_zone
,
335 if (mz
->usage_in_excess
< mz_node
->usage_in_excess
)
338 * We can't avoid mem cgroups that are over their soft
339 * limit by the same amount
341 else if (mz
->usage_in_excess
>= mz_node
->usage_in_excess
)
344 rb_link_node(&mz
->tree_node
, parent
, p
);
345 rb_insert_color(&mz
->tree_node
, &mctz
->rb_root
);
350 __mem_cgroup_remove_exceeded(struct mem_cgroup
*mem
,
351 struct mem_cgroup_per_zone
*mz
,
352 struct mem_cgroup_tree_per_zone
*mctz
)
356 rb_erase(&mz
->tree_node
, &mctz
->rb_root
);
361 mem_cgroup_remove_exceeded(struct mem_cgroup
*mem
,
362 struct mem_cgroup_per_zone
*mz
,
363 struct mem_cgroup_tree_per_zone
*mctz
)
365 spin_lock(&mctz
->lock
);
366 __mem_cgroup_remove_exceeded(mem
, mz
, mctz
);
367 spin_unlock(&mctz
->lock
);
370 static bool mem_cgroup_soft_limit_check(struct mem_cgroup
*mem
)
375 struct mem_cgroup_stat_cpu
*cpustat
;
378 cpustat
= &mem
->stat
.cpustat
[cpu
];
379 val
= __mem_cgroup_stat_read_local(cpustat
, MEM_CGROUP_STAT_EVENTS
);
380 if (unlikely(val
> SOFTLIMIT_EVENTS_THRESH
)) {
381 __mem_cgroup_stat_reset_safe(cpustat
, MEM_CGROUP_STAT_EVENTS
);
388 static void mem_cgroup_update_tree(struct mem_cgroup
*mem
, struct page
*page
)
390 unsigned long long excess
;
391 struct mem_cgroup_per_zone
*mz
;
392 struct mem_cgroup_tree_per_zone
*mctz
;
393 int nid
= page_to_nid(page
);
394 int zid
= page_zonenum(page
);
395 mctz
= soft_limit_tree_from_page(page
);
398 * Necessary to update all ancestors when hierarchy is used.
399 * because their event counter is not touched.
401 for (; mem
; mem
= parent_mem_cgroup(mem
)) {
402 mz
= mem_cgroup_zoneinfo(mem
, nid
, zid
);
403 excess
= res_counter_soft_limit_excess(&mem
->res
);
405 * We have to update the tree if mz is on RB-tree or
406 * mem is over its softlimit.
408 if (excess
|| mz
->on_tree
) {
409 spin_lock(&mctz
->lock
);
410 /* if on-tree, remove it */
412 __mem_cgroup_remove_exceeded(mem
, mz
, mctz
);
414 * Insert again. mz->usage_in_excess will be updated.
415 * If excess is 0, no tree ops.
417 __mem_cgroup_insert_exceeded(mem
, mz
, mctz
, excess
);
418 spin_unlock(&mctz
->lock
);
423 static void mem_cgroup_remove_from_trees(struct mem_cgroup
*mem
)
426 struct mem_cgroup_per_zone
*mz
;
427 struct mem_cgroup_tree_per_zone
*mctz
;
429 for_each_node_state(node
, N_POSSIBLE
) {
430 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
431 mz
= mem_cgroup_zoneinfo(mem
, node
, zone
);
432 mctz
= soft_limit_tree_node_zone(node
, zone
);
433 mem_cgroup_remove_exceeded(mem
, mz
, mctz
);
438 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup
*mem
)
440 return res_counter_soft_limit_excess(&mem
->res
) >> PAGE_SHIFT
;
443 static struct mem_cgroup_per_zone
*
444 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone
*mctz
)
446 struct rb_node
*rightmost
= NULL
;
447 struct mem_cgroup_per_zone
*mz
;
451 rightmost
= rb_last(&mctz
->rb_root
);
453 goto done
; /* Nothing to reclaim from */
455 mz
= rb_entry(rightmost
, struct mem_cgroup_per_zone
, tree_node
);
457 * Remove the node now but someone else can add it back,
458 * we will to add it back at the end of reclaim to its correct
459 * position in the tree.
461 __mem_cgroup_remove_exceeded(mz
->mem
, mz
, mctz
);
462 if (!res_counter_soft_limit_excess(&mz
->mem
->res
) ||
463 !css_tryget(&mz
->mem
->css
))
469 static struct mem_cgroup_per_zone
*
470 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone
*mctz
)
472 struct mem_cgroup_per_zone
*mz
;
474 spin_lock(&mctz
->lock
);
475 mz
= __mem_cgroup_largest_soft_limit_node(mctz
);
476 spin_unlock(&mctz
->lock
);
480 static void mem_cgroup_swap_statistics(struct mem_cgroup
*mem
,
483 int val
= (charge
) ? 1 : -1;
484 struct mem_cgroup_stat
*stat
= &mem
->stat
;
485 struct mem_cgroup_stat_cpu
*cpustat
;
488 cpustat
= &stat
->cpustat
[cpu
];
489 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_SWAPOUT
, val
);
493 static void mem_cgroup_charge_statistics(struct mem_cgroup
*mem
,
494 struct page_cgroup
*pc
,
497 int val
= (charge
) ? 1 : -1;
498 struct mem_cgroup_stat
*stat
= &mem
->stat
;
499 struct mem_cgroup_stat_cpu
*cpustat
;
502 cpustat
= &stat
->cpustat
[cpu
];
503 if (PageCgroupCache(pc
))
504 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_CACHE
, val
);
506 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_RSS
, val
);
509 __mem_cgroup_stat_add_safe(cpustat
,
510 MEM_CGROUP_STAT_PGPGIN_COUNT
, 1);
512 __mem_cgroup_stat_add_safe(cpustat
,
513 MEM_CGROUP_STAT_PGPGOUT_COUNT
, 1);
514 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_EVENTS
, 1);
518 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup
*mem
,
522 struct mem_cgroup_per_zone
*mz
;
525 for_each_online_node(nid
)
526 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
527 mz
= mem_cgroup_zoneinfo(mem
, nid
, zid
);
528 total
+= MEM_CGROUP_ZSTAT(mz
, idx
);
533 static struct mem_cgroup
*mem_cgroup_from_cont(struct cgroup
*cont
)
535 return container_of(cgroup_subsys_state(cont
,
536 mem_cgroup_subsys_id
), struct mem_cgroup
,
540 struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
)
543 * mm_update_next_owner() may clear mm->owner to NULL
544 * if it races with swapoff, page migration, etc.
545 * So this can be called with p == NULL.
550 return container_of(task_subsys_state(p
, mem_cgroup_subsys_id
),
551 struct mem_cgroup
, css
);
554 static struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
556 struct mem_cgroup
*mem
= NULL
;
561 * Because we have no locks, mm->owner's may be being moved to other
562 * cgroup. We use css_tryget() here even if this looks
563 * pessimistic (rather than adding locks here).
567 mem
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
570 } while (!css_tryget(&mem
->css
));
576 * Call callback function against all cgroup under hierarchy tree.
578 static int mem_cgroup_walk_tree(struct mem_cgroup
*root
, void *data
,
579 int (*func
)(struct mem_cgroup
*, void *))
581 int found
, ret
, nextid
;
582 struct cgroup_subsys_state
*css
;
583 struct mem_cgroup
*mem
;
585 if (!root
->use_hierarchy
)
586 return (*func
)(root
, data
);
594 css
= css_get_next(&mem_cgroup_subsys
, nextid
, &root
->css
,
596 if (css
&& css_tryget(css
))
597 mem
= container_of(css
, struct mem_cgroup
, css
);
601 ret
= (*func
)(mem
, data
);
605 } while (!ret
&& css
);
610 static inline bool mem_cgroup_is_root(struct mem_cgroup
*mem
)
612 return (mem
== root_mem_cgroup
);
616 * Following LRU functions are allowed to be used without PCG_LOCK.
617 * Operations are called by routine of global LRU independently from memcg.
618 * What we have to take care of here is validness of pc->mem_cgroup.
620 * Changes to pc->mem_cgroup happens when
623 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
624 * It is added to LRU before charge.
625 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
626 * When moving account, the page is not on LRU. It's isolated.
629 void mem_cgroup_del_lru_list(struct page
*page
, enum lru_list lru
)
631 struct page_cgroup
*pc
;
632 struct mem_cgroup_per_zone
*mz
;
634 if (mem_cgroup_disabled())
636 pc
= lookup_page_cgroup(page
);
637 /* can happen while we handle swapcache. */
638 if (!TestClearPageCgroupAcctLRU(pc
))
640 VM_BUG_ON(!pc
->mem_cgroup
);
642 * We don't check PCG_USED bit. It's cleared when the "page" is finally
643 * removed from global LRU.
645 mz
= page_cgroup_zoneinfo(pc
);
646 MEM_CGROUP_ZSTAT(mz
, lru
) -= 1;
647 if (mem_cgroup_is_root(pc
->mem_cgroup
))
649 VM_BUG_ON(list_empty(&pc
->lru
));
650 list_del_init(&pc
->lru
);
654 void mem_cgroup_del_lru(struct page
*page
)
656 mem_cgroup_del_lru_list(page
, page_lru(page
));
659 void mem_cgroup_rotate_lru_list(struct page
*page
, enum lru_list lru
)
661 struct mem_cgroup_per_zone
*mz
;
662 struct page_cgroup
*pc
;
664 if (mem_cgroup_disabled())
667 pc
= lookup_page_cgroup(page
);
669 * Used bit is set without atomic ops but after smp_wmb().
670 * For making pc->mem_cgroup visible, insert smp_rmb() here.
673 /* unused or root page is not rotated. */
674 if (!PageCgroupUsed(pc
) || mem_cgroup_is_root(pc
->mem_cgroup
))
676 mz
= page_cgroup_zoneinfo(pc
);
677 list_move(&pc
->lru
, &mz
->lists
[lru
]);
680 void mem_cgroup_add_lru_list(struct page
*page
, enum lru_list lru
)
682 struct page_cgroup
*pc
;
683 struct mem_cgroup_per_zone
*mz
;
685 if (mem_cgroup_disabled())
687 pc
= lookup_page_cgroup(page
);
688 VM_BUG_ON(PageCgroupAcctLRU(pc
));
690 * Used bit is set without atomic ops but after smp_wmb().
691 * For making pc->mem_cgroup visible, insert smp_rmb() here.
694 if (!PageCgroupUsed(pc
))
697 mz
= page_cgroup_zoneinfo(pc
);
698 MEM_CGROUP_ZSTAT(mz
, lru
) += 1;
699 SetPageCgroupAcctLRU(pc
);
700 if (mem_cgroup_is_root(pc
->mem_cgroup
))
702 list_add(&pc
->lru
, &mz
->lists
[lru
]);
706 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
707 * lru because the page may.be reused after it's fully uncharged (because of
708 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
709 * it again. This function is only used to charge SwapCache. It's done under
710 * lock_page and expected that zone->lru_lock is never held.
712 static void mem_cgroup_lru_del_before_commit_swapcache(struct page
*page
)
715 struct zone
*zone
= page_zone(page
);
716 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
718 spin_lock_irqsave(&zone
->lru_lock
, flags
);
720 * Forget old LRU when this page_cgroup is *not* used. This Used bit
721 * is guarded by lock_page() because the page is SwapCache.
723 if (!PageCgroupUsed(pc
))
724 mem_cgroup_del_lru_list(page
, page_lru(page
));
725 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
728 static void mem_cgroup_lru_add_after_commit_swapcache(struct page
*page
)
731 struct zone
*zone
= page_zone(page
);
732 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
734 spin_lock_irqsave(&zone
->lru_lock
, flags
);
735 /* link when the page is linked to LRU but page_cgroup isn't */
736 if (PageLRU(page
) && !PageCgroupAcctLRU(pc
))
737 mem_cgroup_add_lru_list(page
, page_lru(page
));
738 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
742 void mem_cgroup_move_lists(struct page
*page
,
743 enum lru_list from
, enum lru_list to
)
745 if (mem_cgroup_disabled())
747 mem_cgroup_del_lru_list(page
, from
);
748 mem_cgroup_add_lru_list(page
, to
);
751 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*mem
)
754 struct mem_cgroup
*curr
= NULL
;
758 curr
= try_get_mem_cgroup_from_mm(task
->mm
);
763 if (curr
->use_hierarchy
)
764 ret
= css_is_ancestor(&curr
->css
, &mem
->css
);
772 * prev_priority control...this will be used in memory reclaim path.
774 int mem_cgroup_get_reclaim_priority(struct mem_cgroup
*mem
)
778 spin_lock(&mem
->reclaim_param_lock
);
779 prev_priority
= mem
->prev_priority
;
780 spin_unlock(&mem
->reclaim_param_lock
);
782 return prev_priority
;
785 void mem_cgroup_note_reclaim_priority(struct mem_cgroup
*mem
, int priority
)
787 spin_lock(&mem
->reclaim_param_lock
);
788 if (priority
< mem
->prev_priority
)
789 mem
->prev_priority
= priority
;
790 spin_unlock(&mem
->reclaim_param_lock
);
793 void mem_cgroup_record_reclaim_priority(struct mem_cgroup
*mem
, int priority
)
795 spin_lock(&mem
->reclaim_param_lock
);
796 mem
->prev_priority
= priority
;
797 spin_unlock(&mem
->reclaim_param_lock
);
800 static int calc_inactive_ratio(struct mem_cgroup
*memcg
, unsigned long *present_pages
)
802 unsigned long active
;
803 unsigned long inactive
;
805 unsigned long inactive_ratio
;
807 inactive
= mem_cgroup_get_local_zonestat(memcg
, LRU_INACTIVE_ANON
);
808 active
= mem_cgroup_get_local_zonestat(memcg
, LRU_ACTIVE_ANON
);
810 gb
= (inactive
+ active
) >> (30 - PAGE_SHIFT
);
812 inactive_ratio
= int_sqrt(10 * gb
);
817 present_pages
[0] = inactive
;
818 present_pages
[1] = active
;
821 return inactive_ratio
;
824 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
)
826 unsigned long active
;
827 unsigned long inactive
;
828 unsigned long present_pages
[2];
829 unsigned long inactive_ratio
;
831 inactive_ratio
= calc_inactive_ratio(memcg
, present_pages
);
833 inactive
= present_pages
[0];
834 active
= present_pages
[1];
836 if (inactive
* inactive_ratio
< active
)
842 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
)
844 unsigned long active
;
845 unsigned long inactive
;
847 inactive
= mem_cgroup_get_local_zonestat(memcg
, LRU_INACTIVE_FILE
);
848 active
= mem_cgroup_get_local_zonestat(memcg
, LRU_ACTIVE_FILE
);
850 return (active
> inactive
);
853 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup
*memcg
,
857 int nid
= zone
->zone_pgdat
->node_id
;
858 int zid
= zone_idx(zone
);
859 struct mem_cgroup_per_zone
*mz
= mem_cgroup_zoneinfo(memcg
, nid
, zid
);
861 return MEM_CGROUP_ZSTAT(mz
, lru
);
864 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
867 int nid
= zone
->zone_pgdat
->node_id
;
868 int zid
= zone_idx(zone
);
869 struct mem_cgroup_per_zone
*mz
= mem_cgroup_zoneinfo(memcg
, nid
, zid
);
871 return &mz
->reclaim_stat
;
874 struct zone_reclaim_stat
*
875 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
877 struct page_cgroup
*pc
;
878 struct mem_cgroup_per_zone
*mz
;
880 if (mem_cgroup_disabled())
883 pc
= lookup_page_cgroup(page
);
885 * Used bit is set without atomic ops but after smp_wmb().
886 * For making pc->mem_cgroup visible, insert smp_rmb() here.
889 if (!PageCgroupUsed(pc
))
892 mz
= page_cgroup_zoneinfo(pc
);
896 return &mz
->reclaim_stat
;
899 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan
,
900 struct list_head
*dst
,
901 unsigned long *scanned
, int order
,
902 int mode
, struct zone
*z
,
903 struct mem_cgroup
*mem_cont
,
904 int active
, int file
)
906 unsigned long nr_taken
= 0;
910 struct list_head
*src
;
911 struct page_cgroup
*pc
, *tmp
;
912 int nid
= z
->zone_pgdat
->node_id
;
913 int zid
= zone_idx(z
);
914 struct mem_cgroup_per_zone
*mz
;
915 int lru
= LRU_FILE
* file
+ active
;
919 mz
= mem_cgroup_zoneinfo(mem_cont
, nid
, zid
);
920 src
= &mz
->lists
[lru
];
923 list_for_each_entry_safe_reverse(pc
, tmp
, src
, lru
) {
924 if (scan
>= nr_to_scan
)
928 if (unlikely(!PageCgroupUsed(pc
)))
930 if (unlikely(!PageLRU(page
)))
934 ret
= __isolate_lru_page(page
, mode
, file
);
937 list_move(&page
->lru
, dst
);
938 mem_cgroup_del_lru(page
);
942 /* we don't affect global LRU but rotate in our LRU */
943 mem_cgroup_rotate_lru_list(page
, page_lru(page
));
954 #define mem_cgroup_from_res_counter(counter, member) \
955 container_of(counter, struct mem_cgroup, member)
957 static bool mem_cgroup_check_under_limit(struct mem_cgroup
*mem
)
959 if (do_swap_account
) {
960 if (res_counter_check_under_limit(&mem
->res
) &&
961 res_counter_check_under_limit(&mem
->memsw
))
964 if (res_counter_check_under_limit(&mem
->res
))
969 static unsigned int get_swappiness(struct mem_cgroup
*memcg
)
971 struct cgroup
*cgrp
= memcg
->css
.cgroup
;
972 unsigned int swappiness
;
975 if (cgrp
->parent
== NULL
)
976 return vm_swappiness
;
978 spin_lock(&memcg
->reclaim_param_lock
);
979 swappiness
= memcg
->swappiness
;
980 spin_unlock(&memcg
->reclaim_param_lock
);
985 static int mem_cgroup_count_children_cb(struct mem_cgroup
*mem
, void *data
)
993 * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
994 * @memcg: The memory cgroup that went over limit
995 * @p: Task that is going to be killed
997 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1000 void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
1002 struct cgroup
*task_cgrp
;
1003 struct cgroup
*mem_cgrp
;
1005 * Need a buffer in BSS, can't rely on allocations. The code relies
1006 * on the assumption that OOM is serialized for memory controller.
1007 * If this assumption is broken, revisit this code.
1009 static char memcg_name
[PATH_MAX
];
1018 mem_cgrp
= memcg
->css
.cgroup
;
1019 task_cgrp
= task_cgroup(p
, mem_cgroup_subsys_id
);
1021 ret
= cgroup_path(task_cgrp
, memcg_name
, PATH_MAX
);
1024 * Unfortunately, we are unable to convert to a useful name
1025 * But we'll still print out the usage information
1032 printk(KERN_INFO
"Task in %s killed", memcg_name
);
1035 ret
= cgroup_path(mem_cgrp
, memcg_name
, PATH_MAX
);
1043 * Continues from above, so we don't need an KERN_ level
1045 printk(KERN_CONT
" as a result of limit of %s\n", memcg_name
);
1048 printk(KERN_INFO
"memory: usage %llukB, limit %llukB, failcnt %llu\n",
1049 res_counter_read_u64(&memcg
->res
, RES_USAGE
) >> 10,
1050 res_counter_read_u64(&memcg
->res
, RES_LIMIT
) >> 10,
1051 res_counter_read_u64(&memcg
->res
, RES_FAILCNT
));
1052 printk(KERN_INFO
"memory+swap: usage %llukB, limit %llukB, "
1054 res_counter_read_u64(&memcg
->memsw
, RES_USAGE
) >> 10,
1055 res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
) >> 10,
1056 res_counter_read_u64(&memcg
->memsw
, RES_FAILCNT
));
1060 * This function returns the number of memcg under hierarchy tree. Returns
1061 * 1(self count) if no children.
1063 static int mem_cgroup_count_children(struct mem_cgroup
*mem
)
1066 mem_cgroup_walk_tree(mem
, &num
, mem_cgroup_count_children_cb
);
1071 * Visit the first child (need not be the first child as per the ordering
1072 * of the cgroup list, since we track last_scanned_child) of @mem and use
1073 * that to reclaim free pages from.
1075 static struct mem_cgroup
*
1076 mem_cgroup_select_victim(struct mem_cgroup
*root_mem
)
1078 struct mem_cgroup
*ret
= NULL
;
1079 struct cgroup_subsys_state
*css
;
1082 if (!root_mem
->use_hierarchy
) {
1083 css_get(&root_mem
->css
);
1089 nextid
= root_mem
->last_scanned_child
+ 1;
1090 css
= css_get_next(&mem_cgroup_subsys
, nextid
, &root_mem
->css
,
1092 if (css
&& css_tryget(css
))
1093 ret
= container_of(css
, struct mem_cgroup
, css
);
1096 /* Updates scanning parameter */
1097 spin_lock(&root_mem
->reclaim_param_lock
);
1099 /* this means start scan from ID:1 */
1100 root_mem
->last_scanned_child
= 0;
1102 root_mem
->last_scanned_child
= found
;
1103 spin_unlock(&root_mem
->reclaim_param_lock
);
1110 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1111 * we reclaimed from, so that we don't end up penalizing one child extensively
1112 * based on its position in the children list.
1114 * root_mem is the original ancestor that we've been reclaim from.
1116 * We give up and return to the caller when we visit root_mem twice.
1117 * (other groups can be removed while we're walking....)
1119 * If shrink==true, for avoiding to free too much, this returns immedieately.
1121 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup
*root_mem
,
1124 unsigned long reclaim_options
)
1126 struct mem_cgroup
*victim
;
1129 bool noswap
= reclaim_options
& MEM_CGROUP_RECLAIM_NOSWAP
;
1130 bool shrink
= reclaim_options
& MEM_CGROUP_RECLAIM_SHRINK
;
1131 bool check_soft
= reclaim_options
& MEM_CGROUP_RECLAIM_SOFT
;
1132 unsigned long excess
= mem_cgroup_get_excess(root_mem
);
1134 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1135 if (root_mem
->memsw_is_minimum
)
1139 victim
= mem_cgroup_select_victim(root_mem
);
1140 if (victim
== root_mem
) {
1143 drain_all_stock_async();
1146 * If we have not been able to reclaim
1147 * anything, it might because there are
1148 * no reclaimable pages under this hierarchy
1150 if (!check_soft
|| !total
) {
1151 css_put(&victim
->css
);
1155 * We want to do more targetted reclaim.
1156 * excess >> 2 is not to excessive so as to
1157 * reclaim too much, nor too less that we keep
1158 * coming back to reclaim from this cgroup
1160 if (total
>= (excess
>> 2) ||
1161 (loop
> MEM_CGROUP_MAX_RECLAIM_LOOPS
)) {
1162 css_put(&victim
->css
);
1167 if (!mem_cgroup_local_usage(&victim
->stat
)) {
1168 /* this cgroup's local usage == 0 */
1169 css_put(&victim
->css
);
1172 /* we use swappiness of local cgroup */
1174 ret
= mem_cgroup_shrink_node_zone(victim
, gfp_mask
,
1175 noswap
, get_swappiness(victim
), zone
,
1176 zone
->zone_pgdat
->node_id
);
1178 ret
= try_to_free_mem_cgroup_pages(victim
, gfp_mask
,
1179 noswap
, get_swappiness(victim
));
1180 css_put(&victim
->css
);
1182 * At shrinking usage, we can't check we should stop here or
1183 * reclaim more. It's depends on callers. last_scanned_child
1184 * will work enough for keeping fairness under tree.
1190 if (res_counter_check_under_soft_limit(&root_mem
->res
))
1192 } else if (mem_cgroup_check_under_limit(root_mem
))
1198 bool mem_cgroup_oom_called(struct task_struct
*task
)
1201 struct mem_cgroup
*mem
;
1202 struct mm_struct
*mm
;
1208 mem
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
1209 if (mem
&& time_before(jiffies
, mem
->last_oom_jiffies
+ HZ
/10))
1215 static int record_last_oom_cb(struct mem_cgroup
*mem
, void *data
)
1217 mem
->last_oom_jiffies
= jiffies
;
1221 static void record_last_oom(struct mem_cgroup
*mem
)
1223 mem_cgroup_walk_tree(mem
, NULL
, record_last_oom_cb
);
1227 * Currently used to update mapped file statistics, but the routine can be
1228 * generalized to update other statistics as well.
1230 void mem_cgroup_update_file_mapped(struct page
*page
, int val
)
1232 struct mem_cgroup
*mem
;
1233 struct mem_cgroup_stat
*stat
;
1234 struct mem_cgroup_stat_cpu
*cpustat
;
1236 struct page_cgroup
*pc
;
1238 pc
= lookup_page_cgroup(page
);
1242 lock_page_cgroup(pc
);
1243 mem
= pc
->mem_cgroup
;
1247 if (!PageCgroupUsed(pc
))
1251 * Preemption is already disabled, we don't need get_cpu()
1253 cpu
= smp_processor_id();
1255 cpustat
= &stat
->cpustat
[cpu
];
1257 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_FILE_MAPPED
, val
);
1259 unlock_page_cgroup(pc
);
1263 * size of first charge trial. "32" comes from vmscan.c's magic value.
1264 * TODO: maybe necessary to use big numbers in big irons.
1266 #define CHARGE_SIZE (32 * PAGE_SIZE)
1267 struct memcg_stock_pcp
{
1268 struct mem_cgroup
*cached
; /* this never be root cgroup */
1270 struct work_struct work
;
1272 static DEFINE_PER_CPU(struct memcg_stock_pcp
, memcg_stock
);
1273 static atomic_t memcg_drain_count
;
1276 * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1277 * from local stock and true is returned. If the stock is 0 or charges from a
1278 * cgroup which is not current target, returns false. This stock will be
1281 static bool consume_stock(struct mem_cgroup
*mem
)
1283 struct memcg_stock_pcp
*stock
;
1286 stock
= &get_cpu_var(memcg_stock
);
1287 if (mem
== stock
->cached
&& stock
->charge
)
1288 stock
->charge
-= PAGE_SIZE
;
1289 else /* need to call res_counter_charge */
1291 put_cpu_var(memcg_stock
);
1296 * Returns stocks cached in percpu to res_counter and reset cached information.
1298 static void drain_stock(struct memcg_stock_pcp
*stock
)
1300 struct mem_cgroup
*old
= stock
->cached
;
1302 if (stock
->charge
) {
1303 res_counter_uncharge(&old
->res
, stock
->charge
);
1304 if (do_swap_account
)
1305 res_counter_uncharge(&old
->memsw
, stock
->charge
);
1307 stock
->cached
= NULL
;
1312 * This must be called under preempt disabled or must be called by
1313 * a thread which is pinned to local cpu.
1315 static void drain_local_stock(struct work_struct
*dummy
)
1317 struct memcg_stock_pcp
*stock
= &__get_cpu_var(memcg_stock
);
1322 * Cache charges(val) which is from res_counter, to local per_cpu area.
1323 * This will be consumed by consumt_stock() function, later.
1325 static void refill_stock(struct mem_cgroup
*mem
, int val
)
1327 struct memcg_stock_pcp
*stock
= &get_cpu_var(memcg_stock
);
1329 if (stock
->cached
!= mem
) { /* reset if necessary */
1331 stock
->cached
= mem
;
1333 stock
->charge
+= val
;
1334 put_cpu_var(memcg_stock
);
1338 * Tries to drain stocked charges in other cpus. This function is asynchronous
1339 * and just put a work per cpu for draining localy on each cpu. Caller can
1340 * expects some charges will be back to res_counter later but cannot wait for
1343 static void drain_all_stock_async(void)
1346 /* This function is for scheduling "drain" in asynchronous way.
1347 * The result of "drain" is not directly handled by callers. Then,
1348 * if someone is calling drain, we don't have to call drain more.
1349 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1350 * there is a race. We just do loose check here.
1352 if (atomic_read(&memcg_drain_count
))
1354 /* Notify other cpus that system-wide "drain" is running */
1355 atomic_inc(&memcg_drain_count
);
1357 for_each_online_cpu(cpu
) {
1358 struct memcg_stock_pcp
*stock
= &per_cpu(memcg_stock
, cpu
);
1359 schedule_work_on(cpu
, &stock
->work
);
1362 atomic_dec(&memcg_drain_count
);
1363 /* We don't wait for flush_work */
1366 /* This is a synchronous drain interface. */
1367 static void drain_all_stock_sync(void)
1369 /* called when force_empty is called */
1370 atomic_inc(&memcg_drain_count
);
1371 schedule_on_each_cpu(drain_local_stock
);
1372 atomic_dec(&memcg_drain_count
);
1375 static int __cpuinit
memcg_stock_cpu_callback(struct notifier_block
*nb
,
1376 unsigned long action
,
1379 int cpu
= (unsigned long)hcpu
;
1380 struct memcg_stock_pcp
*stock
;
1382 if (action
!= CPU_DEAD
)
1384 stock
= &per_cpu(memcg_stock
, cpu
);
1390 * Unlike exported interface, "oom" parameter is added. if oom==true,
1391 * oom-killer can be invoked.
1393 static int __mem_cgroup_try_charge(struct mm_struct
*mm
,
1394 gfp_t gfp_mask
, struct mem_cgroup
**memcg
,
1395 bool oom
, struct page
*page
)
1397 struct mem_cgroup
*mem
, *mem_over_limit
;
1398 int nr_retries
= MEM_CGROUP_RECLAIM_RETRIES
;
1399 struct res_counter
*fail_res
;
1400 int csize
= CHARGE_SIZE
;
1402 if (unlikely(test_thread_flag(TIF_MEMDIE
))) {
1403 /* Don't account this! */
1409 * We always charge the cgroup the mm_struct belongs to.
1410 * The mm_struct's mem_cgroup changes on task migration if the
1411 * thread group leader migrates. It's possible that mm is not
1412 * set, if so charge the init_mm (happens for pagecache usage).
1416 mem
= try_get_mem_cgroup_from_mm(mm
);
1424 VM_BUG_ON(css_is_removed(&mem
->css
));
1425 if (mem_cgroup_is_root(mem
))
1430 unsigned long flags
= 0;
1432 if (consume_stock(mem
))
1435 ret
= res_counter_charge(&mem
->res
, csize
, &fail_res
);
1437 if (!do_swap_account
)
1439 ret
= res_counter_charge(&mem
->memsw
, csize
, &fail_res
);
1442 /* mem+swap counter fails */
1443 res_counter_uncharge(&mem
->res
, csize
);
1444 flags
|= MEM_CGROUP_RECLAIM_NOSWAP
;
1445 mem_over_limit
= mem_cgroup_from_res_counter(fail_res
,
1448 /* mem counter fails */
1449 mem_over_limit
= mem_cgroup_from_res_counter(fail_res
,
1452 /* reduce request size and retry */
1453 if (csize
> PAGE_SIZE
) {
1457 if (!(gfp_mask
& __GFP_WAIT
))
1460 ret
= mem_cgroup_hierarchical_reclaim(mem_over_limit
, NULL
,
1466 * try_to_free_mem_cgroup_pages() might not give us a full
1467 * picture of reclaim. Some pages are reclaimed and might be
1468 * moved to swap cache or just unmapped from the cgroup.
1469 * Check the limit again to see if the reclaim reduced the
1470 * current usage of the cgroup before giving up
1473 if (mem_cgroup_check_under_limit(mem_over_limit
))
1476 if (!nr_retries
--) {
1478 mutex_lock(&memcg_tasklist
);
1479 mem_cgroup_out_of_memory(mem_over_limit
, gfp_mask
);
1480 mutex_unlock(&memcg_tasklist
);
1481 record_last_oom(mem_over_limit
);
1486 if (csize
> PAGE_SIZE
)
1487 refill_stock(mem
, csize
- PAGE_SIZE
);
1490 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1491 * if they exceeds softlimit.
1493 if (mem_cgroup_soft_limit_check(mem
))
1494 mem_cgroup_update_tree(mem
, page
);
1503 * A helper function to get mem_cgroup from ID. must be called under
1504 * rcu_read_lock(). The caller must check css_is_removed() or some if
1505 * it's concern. (dropping refcnt from swap can be called against removed
1508 static struct mem_cgroup
*mem_cgroup_lookup(unsigned short id
)
1510 struct cgroup_subsys_state
*css
;
1512 /* ID 0 is unused ID */
1515 css
= css_lookup(&mem_cgroup_subsys
, id
);
1518 return container_of(css
, struct mem_cgroup
, css
);
1521 static struct mem_cgroup
*try_get_mem_cgroup_from_swapcache(struct page
*page
)
1523 struct mem_cgroup
*mem
;
1524 struct page_cgroup
*pc
;
1528 VM_BUG_ON(!PageLocked(page
));
1530 if (!PageSwapCache(page
))
1533 pc
= lookup_page_cgroup(page
);
1534 lock_page_cgroup(pc
);
1535 if (PageCgroupUsed(pc
)) {
1536 mem
= pc
->mem_cgroup
;
1537 if (mem
&& !css_tryget(&mem
->css
))
1540 ent
.val
= page_private(page
);
1541 id
= lookup_swap_cgroup(ent
);
1543 mem
= mem_cgroup_lookup(id
);
1544 if (mem
&& !css_tryget(&mem
->css
))
1548 unlock_page_cgroup(pc
);
1553 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1554 * USED state. If already USED, uncharge and return.
1557 static void __mem_cgroup_commit_charge(struct mem_cgroup
*mem
,
1558 struct page_cgroup
*pc
,
1559 enum charge_type ctype
)
1561 /* try_charge() can return NULL to *memcg, taking care of it. */
1565 lock_page_cgroup(pc
);
1566 if (unlikely(PageCgroupUsed(pc
))) {
1567 unlock_page_cgroup(pc
);
1568 if (!mem_cgroup_is_root(mem
)) {
1569 res_counter_uncharge(&mem
->res
, PAGE_SIZE
);
1570 if (do_swap_account
)
1571 res_counter_uncharge(&mem
->memsw
, PAGE_SIZE
);
1577 pc
->mem_cgroup
= mem
;
1579 * We access a page_cgroup asynchronously without lock_page_cgroup().
1580 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1581 * is accessed after testing USED bit. To make pc->mem_cgroup visible
1582 * before USED bit, we need memory barrier here.
1583 * See mem_cgroup_add_lru_list(), etc.
1587 case MEM_CGROUP_CHARGE_TYPE_CACHE
:
1588 case MEM_CGROUP_CHARGE_TYPE_SHMEM
:
1589 SetPageCgroupCache(pc
);
1590 SetPageCgroupUsed(pc
);
1592 case MEM_CGROUP_CHARGE_TYPE_MAPPED
:
1593 ClearPageCgroupCache(pc
);
1594 SetPageCgroupUsed(pc
);
1600 mem_cgroup_charge_statistics(mem
, pc
, true);
1602 unlock_page_cgroup(pc
);
1606 * mem_cgroup_move_account - move account of the page
1607 * @pc: page_cgroup of the page.
1608 * @from: mem_cgroup which the page is moved from.
1609 * @to: mem_cgroup which the page is moved to. @from != @to.
1611 * The caller must confirm following.
1612 * - page is not on LRU (isolate_page() is useful.)
1614 * returns 0 at success,
1615 * returns -EBUSY when lock is busy or "pc" is unstable.
1617 * This function does "uncharge" from old cgroup but doesn't do "charge" to
1618 * new cgroup. It should be done by a caller.
1621 static int mem_cgroup_move_account(struct page_cgroup
*pc
,
1622 struct mem_cgroup
*from
, struct mem_cgroup
*to
)
1624 struct mem_cgroup_per_zone
*from_mz
, *to_mz
;
1629 struct mem_cgroup_stat
*stat
;
1630 struct mem_cgroup_stat_cpu
*cpustat
;
1632 VM_BUG_ON(from
== to
);
1633 VM_BUG_ON(PageLRU(pc
->page
));
1635 nid
= page_cgroup_nid(pc
);
1636 zid
= page_cgroup_zid(pc
);
1637 from_mz
= mem_cgroup_zoneinfo(from
, nid
, zid
);
1638 to_mz
= mem_cgroup_zoneinfo(to
, nid
, zid
);
1640 if (!trylock_page_cgroup(pc
))
1643 if (!PageCgroupUsed(pc
))
1646 if (pc
->mem_cgroup
!= from
)
1649 if (!mem_cgroup_is_root(from
))
1650 res_counter_uncharge(&from
->res
, PAGE_SIZE
);
1651 mem_cgroup_charge_statistics(from
, pc
, false);
1654 if (page_mapped(page
) && !PageAnon(page
)) {
1655 cpu
= smp_processor_id();
1656 /* Update mapped_file data for mem_cgroup "from" */
1658 cpustat
= &stat
->cpustat
[cpu
];
1659 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_FILE_MAPPED
,
1662 /* Update mapped_file data for mem_cgroup "to" */
1664 cpustat
= &stat
->cpustat
[cpu
];
1665 __mem_cgroup_stat_add_safe(cpustat
, MEM_CGROUP_STAT_FILE_MAPPED
,
1669 if (do_swap_account
&& !mem_cgroup_is_root(from
))
1670 res_counter_uncharge(&from
->memsw
, PAGE_SIZE
);
1671 css_put(&from
->css
);
1674 pc
->mem_cgroup
= to
;
1675 mem_cgroup_charge_statistics(to
, pc
, true);
1678 unlock_page_cgroup(pc
);
1680 * We charges against "to" which may not have any tasks. Then, "to"
1681 * can be under rmdir(). But in current implementation, caller of
1682 * this function is just force_empty() and it's garanteed that
1683 * "to" is never removed. So, we don't check rmdir status here.
1689 * move charges to its parent.
1692 static int mem_cgroup_move_parent(struct page_cgroup
*pc
,
1693 struct mem_cgroup
*child
,
1696 struct page
*page
= pc
->page
;
1697 struct cgroup
*cg
= child
->css
.cgroup
;
1698 struct cgroup
*pcg
= cg
->parent
;
1699 struct mem_cgroup
*parent
;
1707 parent
= mem_cgroup_from_cont(pcg
);
1710 ret
= __mem_cgroup_try_charge(NULL
, gfp_mask
, &parent
, false, page
);
1714 if (!get_page_unless_zero(page
)) {
1719 ret
= isolate_lru_page(page
);
1724 ret
= mem_cgroup_move_account(pc
, child
, parent
);
1726 putback_lru_page(page
);
1729 /* drop extra refcnt by try_charge() */
1730 css_put(&parent
->css
);
1737 /* drop extra refcnt by try_charge() */
1738 css_put(&parent
->css
);
1739 /* uncharge if move fails */
1740 if (!mem_cgroup_is_root(parent
)) {
1741 res_counter_uncharge(&parent
->res
, PAGE_SIZE
);
1742 if (do_swap_account
)
1743 res_counter_uncharge(&parent
->memsw
, PAGE_SIZE
);
1749 * Charge the memory controller for page usage.
1751 * 0 if the charge was successful
1752 * < 0 if the cgroup is over its limit
1754 static int mem_cgroup_charge_common(struct page
*page
, struct mm_struct
*mm
,
1755 gfp_t gfp_mask
, enum charge_type ctype
,
1756 struct mem_cgroup
*memcg
)
1758 struct mem_cgroup
*mem
;
1759 struct page_cgroup
*pc
;
1762 pc
= lookup_page_cgroup(page
);
1763 /* can happen at boot */
1769 ret
= __mem_cgroup_try_charge(mm
, gfp_mask
, &mem
, true, page
);
1773 __mem_cgroup_commit_charge(mem
, pc
, ctype
);
1777 int mem_cgroup_newpage_charge(struct page
*page
,
1778 struct mm_struct
*mm
, gfp_t gfp_mask
)
1780 if (mem_cgroup_disabled())
1782 if (PageCompound(page
))
1785 * If already mapped, we don't have to account.
1786 * If page cache, page->mapping has address_space.
1787 * But page->mapping may have out-of-use anon_vma pointer,
1788 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1791 if (page_mapped(page
) || (page
->mapping
&& !PageAnon(page
)))
1795 return mem_cgroup_charge_common(page
, mm
, gfp_mask
,
1796 MEM_CGROUP_CHARGE_TYPE_MAPPED
, NULL
);
1800 __mem_cgroup_commit_charge_swapin(struct page
*page
, struct mem_cgroup
*ptr
,
1801 enum charge_type ctype
);
1803 int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
1806 struct mem_cgroup
*mem
= NULL
;
1809 if (mem_cgroup_disabled())
1811 if (PageCompound(page
))
1814 * Corner case handling. This is called from add_to_page_cache()
1815 * in usual. But some FS (shmem) precharges this page before calling it
1816 * and call add_to_page_cache() with GFP_NOWAIT.
1818 * For GFP_NOWAIT case, the page may be pre-charged before calling
1819 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1820 * charge twice. (It works but has to pay a bit larger cost.)
1821 * And when the page is SwapCache, it should take swap information
1822 * into account. This is under lock_page() now.
1824 if (!(gfp_mask
& __GFP_WAIT
)) {
1825 struct page_cgroup
*pc
;
1828 pc
= lookup_page_cgroup(page
);
1831 lock_page_cgroup(pc
);
1832 if (PageCgroupUsed(pc
)) {
1833 unlock_page_cgroup(pc
);
1836 unlock_page_cgroup(pc
);
1839 if (unlikely(!mm
&& !mem
))
1842 if (page_is_file_cache(page
))
1843 return mem_cgroup_charge_common(page
, mm
, gfp_mask
,
1844 MEM_CGROUP_CHARGE_TYPE_CACHE
, NULL
);
1847 if (PageSwapCache(page
)) {
1848 ret
= mem_cgroup_try_charge_swapin(mm
, page
, gfp_mask
, &mem
);
1850 __mem_cgroup_commit_charge_swapin(page
, mem
,
1851 MEM_CGROUP_CHARGE_TYPE_SHMEM
);
1853 ret
= mem_cgroup_charge_common(page
, mm
, gfp_mask
,
1854 MEM_CGROUP_CHARGE_TYPE_SHMEM
, mem
);
1860 * While swap-in, try_charge -> commit or cancel, the page is locked.
1861 * And when try_charge() successfully returns, one refcnt to memcg without
1862 * struct page_cgroup is acquired. This refcnt will be consumed by
1863 * "commit()" or removed by "cancel()"
1865 int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
1867 gfp_t mask
, struct mem_cgroup
**ptr
)
1869 struct mem_cgroup
*mem
;
1872 if (mem_cgroup_disabled())
1875 if (!do_swap_account
)
1878 * A racing thread's fault, or swapoff, may have already updated
1879 * the pte, and even removed page from swap cache: in those cases
1880 * do_swap_page()'s pte_same() test will fail; but there's also a
1881 * KSM case which does need to charge the page.
1883 if (!PageSwapCache(page
))
1885 mem
= try_get_mem_cgroup_from_swapcache(page
);
1889 ret
= __mem_cgroup_try_charge(NULL
, mask
, ptr
, true, page
);
1890 /* drop extra refcnt from tryget */
1896 return __mem_cgroup_try_charge(mm
, mask
, ptr
, true, page
);
1900 __mem_cgroup_commit_charge_swapin(struct page
*page
, struct mem_cgroup
*ptr
,
1901 enum charge_type ctype
)
1903 struct page_cgroup
*pc
;
1905 if (mem_cgroup_disabled())
1909 cgroup_exclude_rmdir(&ptr
->css
);
1910 pc
= lookup_page_cgroup(page
);
1911 mem_cgroup_lru_del_before_commit_swapcache(page
);
1912 __mem_cgroup_commit_charge(ptr
, pc
, ctype
);
1913 mem_cgroup_lru_add_after_commit_swapcache(page
);
1915 * Now swap is on-memory. This means this page may be
1916 * counted both as mem and swap....double count.
1917 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1918 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1919 * may call delete_from_swap_cache() before reach here.
1921 if (do_swap_account
&& PageSwapCache(page
)) {
1922 swp_entry_t ent
= {.val
= page_private(page
)};
1924 struct mem_cgroup
*memcg
;
1926 id
= swap_cgroup_record(ent
, 0);
1928 memcg
= mem_cgroup_lookup(id
);
1931 * This recorded memcg can be obsolete one. So, avoid
1932 * calling css_tryget
1934 if (!mem_cgroup_is_root(memcg
))
1935 res_counter_uncharge(&memcg
->memsw
, PAGE_SIZE
);
1936 mem_cgroup_swap_statistics(memcg
, false);
1937 mem_cgroup_put(memcg
);
1942 * At swapin, we may charge account against cgroup which has no tasks.
1943 * So, rmdir()->pre_destroy() can be called while we do this charge.
1944 * In that case, we need to call pre_destroy() again. check it here.
1946 cgroup_release_and_wakeup_rmdir(&ptr
->css
);
1949 void mem_cgroup_commit_charge_swapin(struct page
*page
, struct mem_cgroup
*ptr
)
1951 __mem_cgroup_commit_charge_swapin(page
, ptr
,
1952 MEM_CGROUP_CHARGE_TYPE_MAPPED
);
1955 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*mem
)
1957 if (mem_cgroup_disabled())
1961 if (!mem_cgroup_is_root(mem
)) {
1962 res_counter_uncharge(&mem
->res
, PAGE_SIZE
);
1963 if (do_swap_account
)
1964 res_counter_uncharge(&mem
->memsw
, PAGE_SIZE
);
1970 __do_uncharge(struct mem_cgroup
*mem
, const enum charge_type ctype
)
1972 struct memcg_batch_info
*batch
= NULL
;
1973 bool uncharge_memsw
= true;
1974 /* If swapout, usage of swap doesn't decrease */
1975 if (!do_swap_account
|| ctype
== MEM_CGROUP_CHARGE_TYPE_SWAPOUT
)
1976 uncharge_memsw
= false;
1978 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
1979 * In those cases, all pages freed continously can be expected to be in
1980 * the same cgroup and we have chance to coalesce uncharges.
1981 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
1982 * because we want to do uncharge as soon as possible.
1984 if (!current
->memcg_batch
.do_batch
|| test_thread_flag(TIF_MEMDIE
))
1985 goto direct_uncharge
;
1987 batch
= ¤t
->memcg_batch
;
1989 * In usual, we do css_get() when we remember memcg pointer.
1990 * But in this case, we keep res->usage until end of a series of
1991 * uncharges. Then, it's ok to ignore memcg's refcnt.
1996 * In typical case, batch->memcg == mem. This means we can
1997 * merge a series of uncharges to an uncharge of res_counter.
1998 * If not, we uncharge res_counter ony by one.
2000 if (batch
->memcg
!= mem
)
2001 goto direct_uncharge
;
2002 /* remember freed charge and uncharge it later */
2003 batch
->bytes
+= PAGE_SIZE
;
2005 batch
->memsw_bytes
+= PAGE_SIZE
;
2008 res_counter_uncharge(&mem
->res
, PAGE_SIZE
);
2010 res_counter_uncharge(&mem
->memsw
, PAGE_SIZE
);
2015 * uncharge if !page_mapped(page)
2017 static struct mem_cgroup
*
2018 __mem_cgroup_uncharge_common(struct page
*page
, enum charge_type ctype
)
2020 struct page_cgroup
*pc
;
2021 struct mem_cgroup
*mem
= NULL
;
2022 struct mem_cgroup_per_zone
*mz
;
2024 if (mem_cgroup_disabled())
2027 if (PageSwapCache(page
))
2031 * Check if our page_cgroup is valid
2033 pc
= lookup_page_cgroup(page
);
2034 if (unlikely(!pc
|| !PageCgroupUsed(pc
)))
2037 lock_page_cgroup(pc
);
2039 mem
= pc
->mem_cgroup
;
2041 if (!PageCgroupUsed(pc
))
2045 case MEM_CGROUP_CHARGE_TYPE_MAPPED
:
2046 case MEM_CGROUP_CHARGE_TYPE_DROP
:
2047 if (page_mapped(page
))
2050 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT
:
2051 if (!PageAnon(page
)) { /* Shared memory */
2052 if (page
->mapping
&& !page_is_file_cache(page
))
2054 } else if (page_mapped(page
)) /* Anon */
2061 if (!mem_cgroup_is_root(mem
))
2062 __do_uncharge(mem
, ctype
);
2063 if (ctype
== MEM_CGROUP_CHARGE_TYPE_SWAPOUT
)
2064 mem_cgroup_swap_statistics(mem
, true);
2065 mem_cgroup_charge_statistics(mem
, pc
, false);
2067 ClearPageCgroupUsed(pc
);
2069 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2070 * freed from LRU. This is safe because uncharged page is expected not
2071 * to be reused (freed soon). Exception is SwapCache, it's handled by
2072 * special functions.
2075 mz
= page_cgroup_zoneinfo(pc
);
2076 unlock_page_cgroup(pc
);
2078 if (mem_cgroup_soft_limit_check(mem
))
2079 mem_cgroup_update_tree(mem
, page
);
2080 /* at swapout, this memcg will be accessed to record to swap */
2081 if (ctype
!= MEM_CGROUP_CHARGE_TYPE_SWAPOUT
)
2087 unlock_page_cgroup(pc
);
2091 void mem_cgroup_uncharge_page(struct page
*page
)
2094 if (page_mapped(page
))
2096 if (page
->mapping
&& !PageAnon(page
))
2098 __mem_cgroup_uncharge_common(page
, MEM_CGROUP_CHARGE_TYPE_MAPPED
);
2101 void mem_cgroup_uncharge_cache_page(struct page
*page
)
2103 VM_BUG_ON(page_mapped(page
));
2104 VM_BUG_ON(page
->mapping
);
2105 __mem_cgroup_uncharge_common(page
, MEM_CGROUP_CHARGE_TYPE_CACHE
);
2109 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2110 * In that cases, pages are freed continuously and we can expect pages
2111 * are in the same memcg. All these calls itself limits the number of
2112 * pages freed at once, then uncharge_start/end() is called properly.
2113 * This may be called prural(2) times in a context,
2116 void mem_cgroup_uncharge_start(void)
2118 current
->memcg_batch
.do_batch
++;
2119 /* We can do nest. */
2120 if (current
->memcg_batch
.do_batch
== 1) {
2121 current
->memcg_batch
.memcg
= NULL
;
2122 current
->memcg_batch
.bytes
= 0;
2123 current
->memcg_batch
.memsw_bytes
= 0;
2127 void mem_cgroup_uncharge_end(void)
2129 struct memcg_batch_info
*batch
= ¤t
->memcg_batch
;
2131 if (!batch
->do_batch
)
2135 if (batch
->do_batch
) /* If stacked, do nothing. */
2141 * This "batch->memcg" is valid without any css_get/put etc...
2142 * bacause we hide charges behind us.
2145 res_counter_uncharge(&batch
->memcg
->res
, batch
->bytes
);
2146 if (batch
->memsw_bytes
)
2147 res_counter_uncharge(&batch
->memcg
->memsw
, batch
->memsw_bytes
);
2148 /* forget this pointer (for sanity check) */
2149 batch
->memcg
= NULL
;
2154 * called after __delete_from_swap_cache() and drop "page" account.
2155 * memcg information is recorded to swap_cgroup of "ent"
2158 mem_cgroup_uncharge_swapcache(struct page
*page
, swp_entry_t ent
, bool swapout
)
2160 struct mem_cgroup
*memcg
;
2161 int ctype
= MEM_CGROUP_CHARGE_TYPE_SWAPOUT
;
2163 if (!swapout
) /* this was a swap cache but the swap is unused ! */
2164 ctype
= MEM_CGROUP_CHARGE_TYPE_DROP
;
2166 memcg
= __mem_cgroup_uncharge_common(page
, ctype
);
2168 /* record memcg information */
2169 if (do_swap_account
&& swapout
&& memcg
) {
2170 swap_cgroup_record(ent
, css_id(&memcg
->css
));
2171 mem_cgroup_get(memcg
);
2173 if (swapout
&& memcg
)
2174 css_put(&memcg
->css
);
2178 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2180 * called from swap_entry_free(). remove record in swap_cgroup and
2181 * uncharge "memsw" account.
2183 void mem_cgroup_uncharge_swap(swp_entry_t ent
)
2185 struct mem_cgroup
*memcg
;
2188 if (!do_swap_account
)
2191 id
= swap_cgroup_record(ent
, 0);
2193 memcg
= mem_cgroup_lookup(id
);
2196 * We uncharge this because swap is freed.
2197 * This memcg can be obsolete one. We avoid calling css_tryget
2199 if (!mem_cgroup_is_root(memcg
))
2200 res_counter_uncharge(&memcg
->memsw
, PAGE_SIZE
);
2201 mem_cgroup_swap_statistics(memcg
, false);
2202 mem_cgroup_put(memcg
);
2209 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2212 int mem_cgroup_prepare_migration(struct page
*page
, struct mem_cgroup
**ptr
)
2214 struct page_cgroup
*pc
;
2215 struct mem_cgroup
*mem
= NULL
;
2218 if (mem_cgroup_disabled())
2221 pc
= lookup_page_cgroup(page
);
2222 lock_page_cgroup(pc
);
2223 if (PageCgroupUsed(pc
)) {
2224 mem
= pc
->mem_cgroup
;
2227 unlock_page_cgroup(pc
);
2230 ret
= __mem_cgroup_try_charge(NULL
, GFP_KERNEL
, &mem
, false,
2238 /* remove redundant charge if migration failed*/
2239 void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
2240 struct page
*oldpage
, struct page
*newpage
)
2242 struct page
*target
, *unused
;
2243 struct page_cgroup
*pc
;
2244 enum charge_type ctype
;
2248 cgroup_exclude_rmdir(&mem
->css
);
2249 /* at migration success, oldpage->mapping is NULL. */
2250 if (oldpage
->mapping
) {
2258 if (PageAnon(target
))
2259 ctype
= MEM_CGROUP_CHARGE_TYPE_MAPPED
;
2260 else if (page_is_file_cache(target
))
2261 ctype
= MEM_CGROUP_CHARGE_TYPE_CACHE
;
2263 ctype
= MEM_CGROUP_CHARGE_TYPE_SHMEM
;
2265 /* unused page is not on radix-tree now. */
2267 __mem_cgroup_uncharge_common(unused
, ctype
);
2269 pc
= lookup_page_cgroup(target
);
2271 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
2272 * So, double-counting is effectively avoided.
2274 __mem_cgroup_commit_charge(mem
, pc
, ctype
);
2277 * Both of oldpage and newpage are still under lock_page().
2278 * Then, we don't have to care about race in radix-tree.
2279 * But we have to be careful that this page is unmapped or not.
2281 * There is a case for !page_mapped(). At the start of
2282 * migration, oldpage was mapped. But now, it's zapped.
2283 * But we know *target* page is not freed/reused under us.
2284 * mem_cgroup_uncharge_page() does all necessary checks.
2286 if (ctype
== MEM_CGROUP_CHARGE_TYPE_MAPPED
)
2287 mem_cgroup_uncharge_page(target
);
2289 * At migration, we may charge account against cgroup which has no tasks
2290 * So, rmdir()->pre_destroy() can be called while we do this charge.
2291 * In that case, we need to call pre_destroy() again. check it here.
2293 cgroup_release_and_wakeup_rmdir(&mem
->css
);
2297 * A call to try to shrink memory usage on charge failure at shmem's swapin.
2298 * Calling hierarchical_reclaim is not enough because we should update
2299 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2300 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2301 * not from the memcg which this page would be charged to.
2302 * try_charge_swapin does all of these works properly.
2304 int mem_cgroup_shmem_charge_fallback(struct page
*page
,
2305 struct mm_struct
*mm
,
2308 struct mem_cgroup
*mem
= NULL
;
2311 if (mem_cgroup_disabled())
2314 ret
= mem_cgroup_try_charge_swapin(mm
, page
, gfp_mask
, &mem
);
2316 mem_cgroup_cancel_charge_swapin(mem
); /* it does !mem check */
2321 static DEFINE_MUTEX(set_limit_mutex
);
2323 static int mem_cgroup_resize_limit(struct mem_cgroup
*memcg
,
2324 unsigned long long val
)
2330 int children
= mem_cgroup_count_children(memcg
);
2331 u64 curusage
, oldusage
;
2334 * For keeping hierarchical_reclaim simple, how long we should retry
2335 * is depends on callers. We set our retry-count to be function
2336 * of # of children which we should visit in this loop.
2338 retry_count
= MEM_CGROUP_RECLAIM_RETRIES
* children
;
2340 oldusage
= res_counter_read_u64(&memcg
->res
, RES_USAGE
);
2342 while (retry_count
) {
2343 if (signal_pending(current
)) {
2348 * Rather than hide all in some function, I do this in
2349 * open coded manner. You see what this really does.
2350 * We have to guarantee mem->res.limit < mem->memsw.limit.
2352 mutex_lock(&set_limit_mutex
);
2353 memswlimit
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
2354 if (memswlimit
< val
) {
2356 mutex_unlock(&set_limit_mutex
);
2359 ret
= res_counter_set_limit(&memcg
->res
, val
);
2361 if (memswlimit
== val
)
2362 memcg
->memsw_is_minimum
= true;
2364 memcg
->memsw_is_minimum
= false;
2366 mutex_unlock(&set_limit_mutex
);
2371 progress
= mem_cgroup_hierarchical_reclaim(memcg
, NULL
,
2373 MEM_CGROUP_RECLAIM_SHRINK
);
2374 curusage
= res_counter_read_u64(&memcg
->res
, RES_USAGE
);
2375 /* Usage is reduced ? */
2376 if (curusage
>= oldusage
)
2379 oldusage
= curusage
;
2385 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup
*memcg
,
2386 unsigned long long val
)
2389 u64 memlimit
, oldusage
, curusage
;
2390 int children
= mem_cgroup_count_children(memcg
);
2393 /* see mem_cgroup_resize_res_limit */
2394 retry_count
= children
* MEM_CGROUP_RECLAIM_RETRIES
;
2395 oldusage
= res_counter_read_u64(&memcg
->memsw
, RES_USAGE
);
2396 while (retry_count
) {
2397 if (signal_pending(current
)) {
2402 * Rather than hide all in some function, I do this in
2403 * open coded manner. You see what this really does.
2404 * We have to guarantee mem->res.limit < mem->memsw.limit.
2406 mutex_lock(&set_limit_mutex
);
2407 memlimit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
2408 if (memlimit
> val
) {
2410 mutex_unlock(&set_limit_mutex
);
2413 ret
= res_counter_set_limit(&memcg
->memsw
, val
);
2415 if (memlimit
== val
)
2416 memcg
->memsw_is_minimum
= true;
2418 memcg
->memsw_is_minimum
= false;
2420 mutex_unlock(&set_limit_mutex
);
2425 mem_cgroup_hierarchical_reclaim(memcg
, NULL
, GFP_KERNEL
,
2426 MEM_CGROUP_RECLAIM_NOSWAP
|
2427 MEM_CGROUP_RECLAIM_SHRINK
);
2428 curusage
= res_counter_read_u64(&memcg
->memsw
, RES_USAGE
);
2429 /* Usage is reduced ? */
2430 if (curusage
>= oldusage
)
2433 oldusage
= curusage
;
2438 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
2439 gfp_t gfp_mask
, int nid
,
2442 unsigned long nr_reclaimed
= 0;
2443 struct mem_cgroup_per_zone
*mz
, *next_mz
= NULL
;
2444 unsigned long reclaimed
;
2446 struct mem_cgroup_tree_per_zone
*mctz
;
2447 unsigned long long excess
;
2452 mctz
= soft_limit_tree_node_zone(nid
, zid
);
2454 * This loop can run a while, specially if mem_cgroup's continuously
2455 * keep exceeding their soft limit and putting the system under
2462 mz
= mem_cgroup_largest_soft_limit_node(mctz
);
2466 reclaimed
= mem_cgroup_hierarchical_reclaim(mz
->mem
, zone
,
2468 MEM_CGROUP_RECLAIM_SOFT
);
2469 nr_reclaimed
+= reclaimed
;
2470 spin_lock(&mctz
->lock
);
2473 * If we failed to reclaim anything from this memory cgroup
2474 * it is time to move on to the next cgroup
2480 * Loop until we find yet another one.
2482 * By the time we get the soft_limit lock
2483 * again, someone might have aded the
2484 * group back on the RB tree. Iterate to
2485 * make sure we get a different mem.
2486 * mem_cgroup_largest_soft_limit_node returns
2487 * NULL if no other cgroup is present on
2491 __mem_cgroup_largest_soft_limit_node(mctz
);
2492 if (next_mz
== mz
) {
2493 css_put(&next_mz
->mem
->css
);
2495 } else /* next_mz == NULL or other memcg */
2499 __mem_cgroup_remove_exceeded(mz
->mem
, mz
, mctz
);
2500 excess
= res_counter_soft_limit_excess(&mz
->mem
->res
);
2502 * One school of thought says that we should not add
2503 * back the node to the tree if reclaim returns 0.
2504 * But our reclaim could return 0, simply because due
2505 * to priority we are exposing a smaller subset of
2506 * memory to reclaim from. Consider this as a longer
2509 /* If excess == 0, no tree ops */
2510 __mem_cgroup_insert_exceeded(mz
->mem
, mz
, mctz
, excess
);
2511 spin_unlock(&mctz
->lock
);
2512 css_put(&mz
->mem
->css
);
2515 * Could not reclaim anything and there are no more
2516 * mem cgroups to try or we seem to be looping without
2517 * reclaiming anything.
2519 if (!nr_reclaimed
&&
2521 loop
> MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS
))
2523 } while (!nr_reclaimed
);
2525 css_put(&next_mz
->mem
->css
);
2526 return nr_reclaimed
;
2530 * This routine traverse page_cgroup in given list and drop them all.
2531 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2533 static int mem_cgroup_force_empty_list(struct mem_cgroup
*mem
,
2534 int node
, int zid
, enum lru_list lru
)
2537 struct mem_cgroup_per_zone
*mz
;
2538 struct page_cgroup
*pc
, *busy
;
2539 unsigned long flags
, loop
;
2540 struct list_head
*list
;
2543 zone
= &NODE_DATA(node
)->node_zones
[zid
];
2544 mz
= mem_cgroup_zoneinfo(mem
, node
, zid
);
2545 list
= &mz
->lists
[lru
];
2547 loop
= MEM_CGROUP_ZSTAT(mz
, lru
);
2548 /* give some margin against EBUSY etc...*/
2553 spin_lock_irqsave(&zone
->lru_lock
, flags
);
2554 if (list_empty(list
)) {
2555 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
2558 pc
= list_entry(list
->prev
, struct page_cgroup
, lru
);
2560 list_move(&pc
->lru
, list
);
2562 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
2565 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
2567 ret
= mem_cgroup_move_parent(pc
, mem
, GFP_KERNEL
);
2571 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
2572 /* found lock contention or "pc" is obsolete. */
2579 if (!ret
&& !list_empty(list
))
2585 * make mem_cgroup's charge to be 0 if there is no task.
2586 * This enables deleting this mem_cgroup.
2588 static int mem_cgroup_force_empty(struct mem_cgroup
*mem
, bool free_all
)
2591 int node
, zid
, shrink
;
2592 int nr_retries
= MEM_CGROUP_RECLAIM_RETRIES
;
2593 struct cgroup
*cgrp
= mem
->css
.cgroup
;
2598 /* should free all ? */
2602 while (mem
->res
.usage
> 0) {
2604 if (cgroup_task_count(cgrp
) || !list_empty(&cgrp
->children
))
2607 if (signal_pending(current
))
2609 /* This is for making all *used* pages to be on LRU. */
2610 lru_add_drain_all();
2611 drain_all_stock_sync();
2613 for_each_node_state(node
, N_HIGH_MEMORY
) {
2614 for (zid
= 0; !ret
&& zid
< MAX_NR_ZONES
; zid
++) {
2617 ret
= mem_cgroup_force_empty_list(mem
,
2626 /* it seems parent cgroup doesn't have enough mem */
2637 /* returns EBUSY if there is a task or if we come here twice. */
2638 if (cgroup_task_count(cgrp
) || !list_empty(&cgrp
->children
) || shrink
) {
2642 /* we call try-to-free pages for make this cgroup empty */
2643 lru_add_drain_all();
2644 /* try to free all pages in this cgroup */
2646 while (nr_retries
&& mem
->res
.usage
> 0) {
2649 if (signal_pending(current
)) {
2653 progress
= try_to_free_mem_cgroup_pages(mem
, GFP_KERNEL
,
2654 false, get_swappiness(mem
));
2657 /* maybe some writeback is necessary */
2658 congestion_wait(BLK_RW_ASYNC
, HZ
/10);
2663 /* try move_account...there may be some *locked* pages. */
2670 int mem_cgroup_force_empty_write(struct cgroup
*cont
, unsigned int event
)
2672 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont
), true);
2676 static u64
mem_cgroup_hierarchy_read(struct cgroup
*cont
, struct cftype
*cft
)
2678 return mem_cgroup_from_cont(cont
)->use_hierarchy
;
2681 static int mem_cgroup_hierarchy_write(struct cgroup
*cont
, struct cftype
*cft
,
2685 struct mem_cgroup
*mem
= mem_cgroup_from_cont(cont
);
2686 struct cgroup
*parent
= cont
->parent
;
2687 struct mem_cgroup
*parent_mem
= NULL
;
2690 parent_mem
= mem_cgroup_from_cont(parent
);
2694 * If parent's use_hierarchy is set, we can't make any modifications
2695 * in the child subtrees. If it is unset, then the change can
2696 * occur, provided the current cgroup has no children.
2698 * For the root cgroup, parent_mem is NULL, we allow value to be
2699 * set if there are no children.
2701 if ((!parent_mem
|| !parent_mem
->use_hierarchy
) &&
2702 (val
== 1 || val
== 0)) {
2703 if (list_empty(&cont
->children
))
2704 mem
->use_hierarchy
= val
;
2714 struct mem_cgroup_idx_data
{
2716 enum mem_cgroup_stat_index idx
;
2720 mem_cgroup_get_idx_stat(struct mem_cgroup
*mem
, void *data
)
2722 struct mem_cgroup_idx_data
*d
= data
;
2723 d
->val
+= mem_cgroup_read_stat(&mem
->stat
, d
->idx
);
2728 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup
*mem
,
2729 enum mem_cgroup_stat_index idx
, s64
*val
)
2731 struct mem_cgroup_idx_data d
;
2734 mem_cgroup_walk_tree(mem
, &d
, mem_cgroup_get_idx_stat
);
2738 static u64
mem_cgroup_read(struct cgroup
*cont
, struct cftype
*cft
)
2740 struct mem_cgroup
*mem
= mem_cgroup_from_cont(cont
);
2744 type
= MEMFILE_TYPE(cft
->private);
2745 name
= MEMFILE_ATTR(cft
->private);
2748 if (name
== RES_USAGE
&& mem_cgroup_is_root(mem
)) {
2749 mem_cgroup_get_recursive_idx_stat(mem
,
2750 MEM_CGROUP_STAT_CACHE
, &idx_val
);
2752 mem_cgroup_get_recursive_idx_stat(mem
,
2753 MEM_CGROUP_STAT_RSS
, &idx_val
);
2757 val
= res_counter_read_u64(&mem
->res
, name
);
2760 if (name
== RES_USAGE
&& mem_cgroup_is_root(mem
)) {
2761 mem_cgroup_get_recursive_idx_stat(mem
,
2762 MEM_CGROUP_STAT_CACHE
, &idx_val
);
2764 mem_cgroup_get_recursive_idx_stat(mem
,
2765 MEM_CGROUP_STAT_RSS
, &idx_val
);
2767 mem_cgroup_get_recursive_idx_stat(mem
,
2768 MEM_CGROUP_STAT_SWAPOUT
, &idx_val
);
2772 val
= res_counter_read_u64(&mem
->memsw
, name
);
2781 * The user of this function is...
2784 static int mem_cgroup_write(struct cgroup
*cont
, struct cftype
*cft
,
2787 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
2789 unsigned long long val
;
2792 type
= MEMFILE_TYPE(cft
->private);
2793 name
= MEMFILE_ATTR(cft
->private);
2796 if (mem_cgroup_is_root(memcg
)) { /* Can't set limit on root */
2800 /* This function does all necessary parse...reuse it */
2801 ret
= res_counter_memparse_write_strategy(buffer
, &val
);
2805 ret
= mem_cgroup_resize_limit(memcg
, val
);
2807 ret
= mem_cgroup_resize_memsw_limit(memcg
, val
);
2809 case RES_SOFT_LIMIT
:
2810 ret
= res_counter_memparse_write_strategy(buffer
, &val
);
2814 * For memsw, soft limits are hard to implement in terms
2815 * of semantics, for now, we support soft limits for
2816 * control without swap
2819 ret
= res_counter_set_soft_limit(&memcg
->res
, val
);
2824 ret
= -EINVAL
; /* should be BUG() ? */
2830 static void memcg_get_hierarchical_limit(struct mem_cgroup
*memcg
,
2831 unsigned long long *mem_limit
, unsigned long long *memsw_limit
)
2833 struct cgroup
*cgroup
;
2834 unsigned long long min_limit
, min_memsw_limit
, tmp
;
2836 min_limit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
2837 min_memsw_limit
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
2838 cgroup
= memcg
->css
.cgroup
;
2839 if (!memcg
->use_hierarchy
)
2842 while (cgroup
->parent
) {
2843 cgroup
= cgroup
->parent
;
2844 memcg
= mem_cgroup_from_cont(cgroup
);
2845 if (!memcg
->use_hierarchy
)
2847 tmp
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
2848 min_limit
= min(min_limit
, tmp
);
2849 tmp
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
2850 min_memsw_limit
= min(min_memsw_limit
, tmp
);
2853 *mem_limit
= min_limit
;
2854 *memsw_limit
= min_memsw_limit
;
2858 static int mem_cgroup_reset(struct cgroup
*cont
, unsigned int event
)
2860 struct mem_cgroup
*mem
;
2863 mem
= mem_cgroup_from_cont(cont
);
2864 type
= MEMFILE_TYPE(event
);
2865 name
= MEMFILE_ATTR(event
);
2869 res_counter_reset_max(&mem
->res
);
2871 res_counter_reset_max(&mem
->memsw
);
2875 res_counter_reset_failcnt(&mem
->res
);
2877 res_counter_reset_failcnt(&mem
->memsw
);
2885 /* For read statistics */
2901 struct mcs_total_stat
{
2902 s64 stat
[NR_MCS_STAT
];
2908 } memcg_stat_strings
[NR_MCS_STAT
] = {
2909 {"cache", "total_cache"},
2910 {"rss", "total_rss"},
2911 {"mapped_file", "total_mapped_file"},
2912 {"pgpgin", "total_pgpgin"},
2913 {"pgpgout", "total_pgpgout"},
2914 {"swap", "total_swap"},
2915 {"inactive_anon", "total_inactive_anon"},
2916 {"active_anon", "total_active_anon"},
2917 {"inactive_file", "total_inactive_file"},
2918 {"active_file", "total_active_file"},
2919 {"unevictable", "total_unevictable"}
2923 static int mem_cgroup_get_local_stat(struct mem_cgroup
*mem
, void *data
)
2925 struct mcs_total_stat
*s
= data
;
2929 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_CACHE
);
2930 s
->stat
[MCS_CACHE
] += val
* PAGE_SIZE
;
2931 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_RSS
);
2932 s
->stat
[MCS_RSS
] += val
* PAGE_SIZE
;
2933 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_FILE_MAPPED
);
2934 s
->stat
[MCS_FILE_MAPPED
] += val
* PAGE_SIZE
;
2935 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_PGPGIN_COUNT
);
2936 s
->stat
[MCS_PGPGIN
] += val
;
2937 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_PGPGOUT_COUNT
);
2938 s
->stat
[MCS_PGPGOUT
] += val
;
2939 if (do_swap_account
) {
2940 val
= mem_cgroup_read_stat(&mem
->stat
, MEM_CGROUP_STAT_SWAPOUT
);
2941 s
->stat
[MCS_SWAP
] += val
* PAGE_SIZE
;
2945 val
= mem_cgroup_get_local_zonestat(mem
, LRU_INACTIVE_ANON
);
2946 s
->stat
[MCS_INACTIVE_ANON
] += val
* PAGE_SIZE
;
2947 val
= mem_cgroup_get_local_zonestat(mem
, LRU_ACTIVE_ANON
);
2948 s
->stat
[MCS_ACTIVE_ANON
] += val
* PAGE_SIZE
;
2949 val
= mem_cgroup_get_local_zonestat(mem
, LRU_INACTIVE_FILE
);
2950 s
->stat
[MCS_INACTIVE_FILE
] += val
* PAGE_SIZE
;
2951 val
= mem_cgroup_get_local_zonestat(mem
, LRU_ACTIVE_FILE
);
2952 s
->stat
[MCS_ACTIVE_FILE
] += val
* PAGE_SIZE
;
2953 val
= mem_cgroup_get_local_zonestat(mem
, LRU_UNEVICTABLE
);
2954 s
->stat
[MCS_UNEVICTABLE
] += val
* PAGE_SIZE
;
2959 mem_cgroup_get_total_stat(struct mem_cgroup
*mem
, struct mcs_total_stat
*s
)
2961 mem_cgroup_walk_tree(mem
, s
, mem_cgroup_get_local_stat
);
2964 static int mem_control_stat_show(struct cgroup
*cont
, struct cftype
*cft
,
2965 struct cgroup_map_cb
*cb
)
2967 struct mem_cgroup
*mem_cont
= mem_cgroup_from_cont(cont
);
2968 struct mcs_total_stat mystat
;
2971 memset(&mystat
, 0, sizeof(mystat
));
2972 mem_cgroup_get_local_stat(mem_cont
, &mystat
);
2974 for (i
= 0; i
< NR_MCS_STAT
; i
++) {
2975 if (i
== MCS_SWAP
&& !do_swap_account
)
2977 cb
->fill(cb
, memcg_stat_strings
[i
].local_name
, mystat
.stat
[i
]);
2980 /* Hierarchical information */
2982 unsigned long long limit
, memsw_limit
;
2983 memcg_get_hierarchical_limit(mem_cont
, &limit
, &memsw_limit
);
2984 cb
->fill(cb
, "hierarchical_memory_limit", limit
);
2985 if (do_swap_account
)
2986 cb
->fill(cb
, "hierarchical_memsw_limit", memsw_limit
);
2989 memset(&mystat
, 0, sizeof(mystat
));
2990 mem_cgroup_get_total_stat(mem_cont
, &mystat
);
2991 for (i
= 0; i
< NR_MCS_STAT
; i
++) {
2992 if (i
== MCS_SWAP
&& !do_swap_account
)
2994 cb
->fill(cb
, memcg_stat_strings
[i
].total_name
, mystat
.stat
[i
]);
2997 #ifdef CONFIG_DEBUG_VM
2998 cb
->fill(cb
, "inactive_ratio", calc_inactive_ratio(mem_cont
, NULL
));
3002 struct mem_cgroup_per_zone
*mz
;
3003 unsigned long recent_rotated
[2] = {0, 0};
3004 unsigned long recent_scanned
[2] = {0, 0};
3006 for_each_online_node(nid
)
3007 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
3008 mz
= mem_cgroup_zoneinfo(mem_cont
, nid
, zid
);
3010 recent_rotated
[0] +=
3011 mz
->reclaim_stat
.recent_rotated
[0];
3012 recent_rotated
[1] +=
3013 mz
->reclaim_stat
.recent_rotated
[1];
3014 recent_scanned
[0] +=
3015 mz
->reclaim_stat
.recent_scanned
[0];
3016 recent_scanned
[1] +=
3017 mz
->reclaim_stat
.recent_scanned
[1];
3019 cb
->fill(cb
, "recent_rotated_anon", recent_rotated
[0]);
3020 cb
->fill(cb
, "recent_rotated_file", recent_rotated
[1]);
3021 cb
->fill(cb
, "recent_scanned_anon", recent_scanned
[0]);
3022 cb
->fill(cb
, "recent_scanned_file", recent_scanned
[1]);
3029 static u64
mem_cgroup_swappiness_read(struct cgroup
*cgrp
, struct cftype
*cft
)
3031 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
3033 return get_swappiness(memcg
);
3036 static int mem_cgroup_swappiness_write(struct cgroup
*cgrp
, struct cftype
*cft
,
3039 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
3040 struct mem_cgroup
*parent
;
3045 if (cgrp
->parent
== NULL
)
3048 parent
= mem_cgroup_from_cont(cgrp
->parent
);
3052 /* If under hierarchy, only empty-root can set this value */
3053 if ((parent
->use_hierarchy
) ||
3054 (memcg
->use_hierarchy
&& !list_empty(&cgrp
->children
))) {
3059 spin_lock(&memcg
->reclaim_param_lock
);
3060 memcg
->swappiness
= val
;
3061 spin_unlock(&memcg
->reclaim_param_lock
);
3069 static struct cftype mem_cgroup_files
[] = {
3071 .name
= "usage_in_bytes",
3072 .private = MEMFILE_PRIVATE(_MEM
, RES_USAGE
),
3073 .read_u64
= mem_cgroup_read
,
3076 .name
= "max_usage_in_bytes",
3077 .private = MEMFILE_PRIVATE(_MEM
, RES_MAX_USAGE
),
3078 .trigger
= mem_cgroup_reset
,
3079 .read_u64
= mem_cgroup_read
,
3082 .name
= "limit_in_bytes",
3083 .private = MEMFILE_PRIVATE(_MEM
, RES_LIMIT
),
3084 .write_string
= mem_cgroup_write
,
3085 .read_u64
= mem_cgroup_read
,
3088 .name
= "soft_limit_in_bytes",
3089 .private = MEMFILE_PRIVATE(_MEM
, RES_SOFT_LIMIT
),
3090 .write_string
= mem_cgroup_write
,
3091 .read_u64
= mem_cgroup_read
,
3095 .private = MEMFILE_PRIVATE(_MEM
, RES_FAILCNT
),
3096 .trigger
= mem_cgroup_reset
,
3097 .read_u64
= mem_cgroup_read
,
3101 .read_map
= mem_control_stat_show
,
3104 .name
= "force_empty",
3105 .trigger
= mem_cgroup_force_empty_write
,
3108 .name
= "use_hierarchy",
3109 .write_u64
= mem_cgroup_hierarchy_write
,
3110 .read_u64
= mem_cgroup_hierarchy_read
,
3113 .name
= "swappiness",
3114 .read_u64
= mem_cgroup_swappiness_read
,
3115 .write_u64
= mem_cgroup_swappiness_write
,
3119 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3120 static struct cftype memsw_cgroup_files
[] = {
3122 .name
= "memsw.usage_in_bytes",
3123 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_USAGE
),
3124 .read_u64
= mem_cgroup_read
,
3127 .name
= "memsw.max_usage_in_bytes",
3128 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_MAX_USAGE
),
3129 .trigger
= mem_cgroup_reset
,
3130 .read_u64
= mem_cgroup_read
,
3133 .name
= "memsw.limit_in_bytes",
3134 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_LIMIT
),
3135 .write_string
= mem_cgroup_write
,
3136 .read_u64
= mem_cgroup_read
,
3139 .name
= "memsw.failcnt",
3140 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_FAILCNT
),
3141 .trigger
= mem_cgroup_reset
,
3142 .read_u64
= mem_cgroup_read
,
3146 static int register_memsw_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
3148 if (!do_swap_account
)
3150 return cgroup_add_files(cont
, ss
, memsw_cgroup_files
,
3151 ARRAY_SIZE(memsw_cgroup_files
));
3154 static int register_memsw_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
3160 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup
*mem
, int node
)
3162 struct mem_cgroup_per_node
*pn
;
3163 struct mem_cgroup_per_zone
*mz
;
3165 int zone
, tmp
= node
;
3167 * This routine is called against possible nodes.
3168 * But it's BUG to call kmalloc() against offline node.
3170 * TODO: this routine can waste much memory for nodes which will
3171 * never be onlined. It's better to use memory hotplug callback
3174 if (!node_state(node
, N_NORMAL_MEMORY
))
3176 pn
= kmalloc_node(sizeof(*pn
), GFP_KERNEL
, tmp
);
3180 mem
->info
.nodeinfo
[node
] = pn
;
3181 memset(pn
, 0, sizeof(*pn
));
3183 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
3184 mz
= &pn
->zoneinfo
[zone
];
3186 INIT_LIST_HEAD(&mz
->lists
[l
]);
3187 mz
->usage_in_excess
= 0;
3188 mz
->on_tree
= false;
3194 static void free_mem_cgroup_per_zone_info(struct mem_cgroup
*mem
, int node
)
3196 kfree(mem
->info
.nodeinfo
[node
]);
3199 static int mem_cgroup_size(void)
3201 int cpustat_size
= nr_cpu_ids
* sizeof(struct mem_cgroup_stat_cpu
);
3202 return sizeof(struct mem_cgroup
) + cpustat_size
;
3205 static struct mem_cgroup
*mem_cgroup_alloc(void)
3207 struct mem_cgroup
*mem
;
3208 int size
= mem_cgroup_size();
3210 if (size
< PAGE_SIZE
)
3211 mem
= kmalloc(size
, GFP_KERNEL
);
3213 mem
= vmalloc(size
);
3216 memset(mem
, 0, size
);
3221 * At destroying mem_cgroup, references from swap_cgroup can remain.
3222 * (scanning all at force_empty is too costly...)
3224 * Instead of clearing all references at force_empty, we remember
3225 * the number of reference from swap_cgroup and free mem_cgroup when
3226 * it goes down to 0.
3228 * Removal of cgroup itself succeeds regardless of refs from swap.
3231 static void __mem_cgroup_free(struct mem_cgroup
*mem
)
3235 mem_cgroup_remove_from_trees(mem
);
3236 free_css_id(&mem_cgroup_subsys
, &mem
->css
);
3238 for_each_node_state(node
, N_POSSIBLE
)
3239 free_mem_cgroup_per_zone_info(mem
, node
);
3241 if (mem_cgroup_size() < PAGE_SIZE
)
3247 static void mem_cgroup_get(struct mem_cgroup
*mem
)
3249 atomic_inc(&mem
->refcnt
);
3252 static void mem_cgroup_put(struct mem_cgroup
*mem
)
3254 if (atomic_dec_and_test(&mem
->refcnt
)) {
3255 struct mem_cgroup
*parent
= parent_mem_cgroup(mem
);
3256 __mem_cgroup_free(mem
);
3258 mem_cgroup_put(parent
);
3263 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3265 static struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*mem
)
3267 if (!mem
->res
.parent
)
3269 return mem_cgroup_from_res_counter(mem
->res
.parent
, res
);
3272 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3273 static void __init
enable_swap_cgroup(void)
3275 if (!mem_cgroup_disabled() && really_do_swap_account
)
3276 do_swap_account
= 1;
3279 static void __init
enable_swap_cgroup(void)
3284 static int mem_cgroup_soft_limit_tree_init(void)
3286 struct mem_cgroup_tree_per_node
*rtpn
;
3287 struct mem_cgroup_tree_per_zone
*rtpz
;
3288 int tmp
, node
, zone
;
3290 for_each_node_state(node
, N_POSSIBLE
) {
3292 if (!node_state(node
, N_NORMAL_MEMORY
))
3294 rtpn
= kzalloc_node(sizeof(*rtpn
), GFP_KERNEL
, tmp
);
3298 soft_limit_tree
.rb_tree_per_node
[node
] = rtpn
;
3300 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
3301 rtpz
= &rtpn
->rb_tree_per_zone
[zone
];
3302 rtpz
->rb_root
= RB_ROOT
;
3303 spin_lock_init(&rtpz
->lock
);
3309 static struct cgroup_subsys_state
* __ref
3310 mem_cgroup_create(struct cgroup_subsys
*ss
, struct cgroup
*cont
)
3312 struct mem_cgroup
*mem
, *parent
;
3313 long error
= -ENOMEM
;
3316 mem
= mem_cgroup_alloc();
3318 return ERR_PTR(error
);
3320 for_each_node_state(node
, N_POSSIBLE
)
3321 if (alloc_mem_cgroup_per_zone_info(mem
, node
))
3325 if (cont
->parent
== NULL
) {
3327 enable_swap_cgroup();
3329 root_mem_cgroup
= mem
;
3330 if (mem_cgroup_soft_limit_tree_init())
3332 for_each_possible_cpu(cpu
) {
3333 struct memcg_stock_pcp
*stock
=
3334 &per_cpu(memcg_stock
, cpu
);
3335 INIT_WORK(&stock
->work
, drain_local_stock
);
3337 hotcpu_notifier(memcg_stock_cpu_callback
, 0);
3340 parent
= mem_cgroup_from_cont(cont
->parent
);
3341 mem
->use_hierarchy
= parent
->use_hierarchy
;
3344 if (parent
&& parent
->use_hierarchy
) {
3345 res_counter_init(&mem
->res
, &parent
->res
);
3346 res_counter_init(&mem
->memsw
, &parent
->memsw
);
3348 * We increment refcnt of the parent to ensure that we can
3349 * safely access it on res_counter_charge/uncharge.
3350 * This refcnt will be decremented when freeing this
3351 * mem_cgroup(see mem_cgroup_put).
3353 mem_cgroup_get(parent
);
3355 res_counter_init(&mem
->res
, NULL
);
3356 res_counter_init(&mem
->memsw
, NULL
);
3358 mem
->last_scanned_child
= 0;
3359 spin_lock_init(&mem
->reclaim_param_lock
);
3362 mem
->swappiness
= get_swappiness(parent
);
3363 atomic_set(&mem
->refcnt
, 1);
3366 __mem_cgroup_free(mem
);
3367 root_mem_cgroup
= NULL
;
3368 return ERR_PTR(error
);
3371 static int mem_cgroup_pre_destroy(struct cgroup_subsys
*ss
,
3372 struct cgroup
*cont
)
3374 struct mem_cgroup
*mem
= mem_cgroup_from_cont(cont
);
3376 return mem_cgroup_force_empty(mem
, false);
3379 static void mem_cgroup_destroy(struct cgroup_subsys
*ss
,
3380 struct cgroup
*cont
)
3382 struct mem_cgroup
*mem
= mem_cgroup_from_cont(cont
);
3384 mem_cgroup_put(mem
);
3387 static int mem_cgroup_populate(struct cgroup_subsys
*ss
,
3388 struct cgroup
*cont
)
3392 ret
= cgroup_add_files(cont
, ss
, mem_cgroup_files
,
3393 ARRAY_SIZE(mem_cgroup_files
));
3396 ret
= register_memsw_files(cont
, ss
);
3400 static void mem_cgroup_move_task(struct cgroup_subsys
*ss
,
3401 struct cgroup
*cont
,
3402 struct cgroup
*old_cont
,
3403 struct task_struct
*p
,
3406 mutex_lock(&memcg_tasklist
);
3408 * FIXME: It's better to move charges of this process from old
3409 * memcg to new memcg. But it's just on TODO-List now.
3411 mutex_unlock(&memcg_tasklist
);
3414 struct cgroup_subsys mem_cgroup_subsys
= {
3416 .subsys_id
= mem_cgroup_subsys_id
,
3417 .create
= mem_cgroup_create
,
3418 .pre_destroy
= mem_cgroup_pre_destroy
,
3419 .destroy
= mem_cgroup_destroy
,
3420 .populate
= mem_cgroup_populate
,
3421 .attach
= mem_cgroup_move_task
,
3426 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3428 static int __init
disable_swap_account(char *s
)
3430 really_do_swap_account
= 0;
3433 __setup("noswapaccount", disable_swap_account
);