1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/export.h>
37 #include <linux/mutex.h>
38 #include <linux/rbtree.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swapops.h>
42 #include <linux/spinlock.h>
43 #include <linux/eventfd.h>
44 #include <linux/sort.h>
46 #include <linux/seq_file.h>
47 #include <linux/vmalloc.h>
48 #include <linux/mm_inline.h>
49 #include <linux/page_cgroup.h>
50 #include <linux/cpu.h>
51 #include <linux/oom.h>
54 #include <net/tcp_memcontrol.h>
56 #include <asm/uaccess.h>
58 #include <trace/events/vmscan.h>
60 struct cgroup_subsys mem_cgroup_subsys __read_mostly
;
61 #define MEM_CGROUP_RECLAIM_RETRIES 5
62 struct mem_cgroup
*root_mem_cgroup __read_mostly
;
64 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
65 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66 int do_swap_account __read_mostly
;
68 /* for remember boot option*/
69 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
70 static int really_do_swap_account __initdata
= 1;
72 static int really_do_swap_account __initdata
= 0;
76 #define do_swap_account (0)
81 * Statistics for memory cgroup.
83 enum mem_cgroup_stat_index
{
85 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
87 MEM_CGROUP_STAT_CACHE
, /* # of pages charged as cache */
88 MEM_CGROUP_STAT_RSS
, /* # of pages charged as anon rss */
89 MEM_CGROUP_STAT_FILE_MAPPED
, /* # of pages charged as file rss */
90 MEM_CGROUP_STAT_SWAPOUT
, /* # of pages, swapped out */
91 MEM_CGROUP_STAT_DATA
, /* end of data requires synchronization */
92 MEM_CGROUP_ON_MOVE
, /* someone is moving account between groups */
93 MEM_CGROUP_STAT_NSTATS
,
96 enum mem_cgroup_events_index
{
97 MEM_CGROUP_EVENTS_PGPGIN
, /* # of pages paged in */
98 MEM_CGROUP_EVENTS_PGPGOUT
, /* # of pages paged out */
99 MEM_CGROUP_EVENTS_COUNT
, /* # of pages paged in/out */
100 MEM_CGROUP_EVENTS_PGFAULT
, /* # of page-faults */
101 MEM_CGROUP_EVENTS_PGMAJFAULT
, /* # of major page-faults */
102 MEM_CGROUP_EVENTS_NSTATS
,
105 * Per memcg event counter is incremented at every pagein/pageout. With THP,
106 * it will be incremated by the number of pages. This counter is used for
107 * for trigger some periodic events. This is straightforward and better
108 * than using jiffies etc. to handle periodic memcg event.
110 enum mem_cgroup_events_target
{
111 MEM_CGROUP_TARGET_THRESH
,
112 MEM_CGROUP_TARGET_SOFTLIMIT
,
113 MEM_CGROUP_TARGET_NUMAINFO
,
116 #define THRESHOLDS_EVENTS_TARGET (128)
117 #define SOFTLIMIT_EVENTS_TARGET (1024)
118 #define NUMAINFO_EVENTS_TARGET (1024)
120 struct mem_cgroup_stat_cpu
{
121 long count
[MEM_CGROUP_STAT_NSTATS
];
122 unsigned long events
[MEM_CGROUP_EVENTS_NSTATS
];
123 unsigned long targets
[MEM_CGROUP_NTARGETS
];
126 struct mem_cgroup_reclaim_iter
{
127 /* css_id of the last scanned hierarchy member */
129 /* scan generation, increased every round-trip */
130 unsigned int generation
;
134 * per-zone information in memory controller.
136 struct mem_cgroup_per_zone
{
137 struct lruvec lruvec
;
138 unsigned long count
[NR_LRU_LISTS
];
140 struct mem_cgroup_reclaim_iter reclaim_iter
[DEF_PRIORITY
+ 1];
142 struct zone_reclaim_stat reclaim_stat
;
143 struct rb_node tree_node
; /* RB tree node */
144 unsigned long long usage_in_excess
;/* Set to the value by which */
145 /* the soft limit is exceeded*/
147 struct mem_cgroup
*mem
; /* Back pointer, we cannot */
148 /* use container_of */
150 /* Macro for accessing counter */
151 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
153 struct mem_cgroup_per_node
{
154 struct mem_cgroup_per_zone zoneinfo
[MAX_NR_ZONES
];
157 struct mem_cgroup_lru_info
{
158 struct mem_cgroup_per_node
*nodeinfo
[MAX_NUMNODES
];
162 * Cgroups above their limits are maintained in a RB-Tree, independent of
163 * their hierarchy representation
166 struct mem_cgroup_tree_per_zone
{
167 struct rb_root rb_root
;
171 struct mem_cgroup_tree_per_node
{
172 struct mem_cgroup_tree_per_zone rb_tree_per_zone
[MAX_NR_ZONES
];
175 struct mem_cgroup_tree
{
176 struct mem_cgroup_tree_per_node
*rb_tree_per_node
[MAX_NUMNODES
];
179 static struct mem_cgroup_tree soft_limit_tree __read_mostly
;
181 struct mem_cgroup_threshold
{
182 struct eventfd_ctx
*eventfd
;
187 struct mem_cgroup_threshold_ary
{
188 /* An array index points to threshold just below usage. */
189 int current_threshold
;
190 /* Size of entries[] */
192 /* Array of thresholds */
193 struct mem_cgroup_threshold entries
[0];
196 struct mem_cgroup_thresholds
{
197 /* Primary thresholds array */
198 struct mem_cgroup_threshold_ary
*primary
;
200 * Spare threshold array.
201 * This is needed to make mem_cgroup_unregister_event() "never fail".
202 * It must be able to store at least primary->size - 1 entries.
204 struct mem_cgroup_threshold_ary
*spare
;
208 struct mem_cgroup_eventfd_list
{
209 struct list_head list
;
210 struct eventfd_ctx
*eventfd
;
213 static void mem_cgroup_threshold(struct mem_cgroup
*memcg
);
214 static void mem_cgroup_oom_notify(struct mem_cgroup
*memcg
);
217 * The memory controller data structure. The memory controller controls both
218 * page cache and RSS per cgroup. We would eventually like to provide
219 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
220 * to help the administrator determine what knobs to tune.
222 * TODO: Add a water mark for the memory controller. Reclaim will begin when
223 * we hit the water mark. May be even add a low water mark, such that
224 * no reclaim occurs from a cgroup at it's low water mark, this is
225 * a feature that will be implemented much later in the future.
228 struct cgroup_subsys_state css
;
230 * the counter to account for memory usage
232 struct res_counter res
;
234 * the counter to account for mem+swap usage.
236 struct res_counter memsw
;
238 * Per cgroup active and inactive list, similar to the
239 * per zone LRU lists.
241 struct mem_cgroup_lru_info info
;
242 int last_scanned_node
;
244 nodemask_t scan_nodes
;
245 atomic_t numainfo_events
;
246 atomic_t numainfo_updating
;
249 * Should the accounting and control be hierarchical, per subtree?
259 /* OOM-Killer disable */
260 int oom_kill_disable
;
262 /* set when res.limit == memsw.limit */
263 bool memsw_is_minimum
;
265 /* protect arrays of thresholds */
266 struct mutex thresholds_lock
;
268 /* thresholds for memory usage. RCU-protected */
269 struct mem_cgroup_thresholds thresholds
;
271 /* thresholds for mem+swap usage. RCU-protected */
272 struct mem_cgroup_thresholds memsw_thresholds
;
274 /* For oom notifier event fd */
275 struct list_head oom_notify
;
278 * Should we move charges of a task when a task is moved into this
279 * mem_cgroup ? And what type of charges should we move ?
281 unsigned long move_charge_at_immigrate
;
285 struct mem_cgroup_stat_cpu
*stat
;
287 * used when a cpu is offlined or other synchronizations
288 * See mem_cgroup_read_stat().
290 struct mem_cgroup_stat_cpu nocpu_base
;
291 spinlock_t pcp_counter_lock
;
294 struct tcp_memcontrol tcp_mem
;
298 /* Stuffs for move charges at task migration. */
300 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
301 * left-shifted bitmap of these types.
304 MOVE_CHARGE_TYPE_ANON
, /* private anonymous page and swap of it */
305 MOVE_CHARGE_TYPE_FILE
, /* file page(including tmpfs) and swap of it */
309 /* "mc" and its members are protected by cgroup_mutex */
310 static struct move_charge_struct
{
311 spinlock_t lock
; /* for from, to */
312 struct mem_cgroup
*from
;
313 struct mem_cgroup
*to
;
314 unsigned long precharge
;
315 unsigned long moved_charge
;
316 unsigned long moved_swap
;
317 struct task_struct
*moving_task
; /* a task moving charges */
318 wait_queue_head_t waitq
; /* a waitq for other context */
320 .lock
= __SPIN_LOCK_UNLOCKED(mc
.lock
),
321 .waitq
= __WAIT_QUEUE_HEAD_INITIALIZER(mc
.waitq
),
324 static bool move_anon(void)
326 return test_bit(MOVE_CHARGE_TYPE_ANON
,
327 &mc
.to
->move_charge_at_immigrate
);
330 static bool move_file(void)
332 return test_bit(MOVE_CHARGE_TYPE_FILE
,
333 &mc
.to
->move_charge_at_immigrate
);
337 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
338 * limit reclaim to prevent infinite loops, if they ever occur.
340 #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
341 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
344 MEM_CGROUP_CHARGE_TYPE_CACHE
= 0,
345 MEM_CGROUP_CHARGE_TYPE_MAPPED
,
346 MEM_CGROUP_CHARGE_TYPE_SHMEM
, /* used by page migration of shmem */
347 MEM_CGROUP_CHARGE_TYPE_FORCE
, /* used by force_empty */
348 MEM_CGROUP_CHARGE_TYPE_SWAPOUT
, /* for accounting swapcache */
349 MEM_CGROUP_CHARGE_TYPE_DROP
, /* a page was unused swap cache */
353 /* for encoding cft->private value on file */
356 #define _OOM_TYPE (2)
357 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
358 #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
359 #define MEMFILE_ATTR(val) ((val) & 0xffff)
360 /* Used for OOM nofiier */
361 #define OOM_CONTROL (0)
364 * Reclaim flags for mem_cgroup_hierarchical_reclaim
366 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
367 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
368 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
369 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
371 static void mem_cgroup_get(struct mem_cgroup
*memcg
);
372 static void mem_cgroup_put(struct mem_cgroup
*memcg
);
374 /* Writing them here to avoid exposing memcg's inner layout */
375 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
377 #include <net/sock.h>
380 static bool mem_cgroup_is_root(struct mem_cgroup
*memcg
);
381 void sock_update_memcg(struct sock
*sk
)
383 if (static_branch(&memcg_socket_limit_enabled
)) {
384 struct mem_cgroup
*memcg
;
386 BUG_ON(!sk
->sk_prot
->proto_cgroup
);
388 /* Socket cloning can throw us here with sk_cgrp already
389 * filled. It won't however, necessarily happen from
390 * process context. So the test for root memcg given
391 * the current task's memcg won't help us in this case.
393 * Respecting the original socket's memcg is a better
394 * decision in this case.
397 BUG_ON(mem_cgroup_is_root(sk
->sk_cgrp
->memcg
));
398 mem_cgroup_get(sk
->sk_cgrp
->memcg
);
403 memcg
= mem_cgroup_from_task(current
);
404 if (!mem_cgroup_is_root(memcg
)) {
405 mem_cgroup_get(memcg
);
406 sk
->sk_cgrp
= sk
->sk_prot
->proto_cgroup(memcg
);
411 EXPORT_SYMBOL(sock_update_memcg
);
413 void sock_release_memcg(struct sock
*sk
)
415 if (static_branch(&memcg_socket_limit_enabled
) && sk
->sk_cgrp
) {
416 struct mem_cgroup
*memcg
;
417 WARN_ON(!sk
->sk_cgrp
->memcg
);
418 memcg
= sk
->sk_cgrp
->memcg
;
419 mem_cgroup_put(memcg
);
423 struct cg_proto
*tcp_proto_cgroup(struct mem_cgroup
*memcg
)
425 if (!memcg
|| mem_cgroup_is_root(memcg
))
428 return &memcg
->tcp_mem
.cg_proto
;
430 EXPORT_SYMBOL(tcp_proto_cgroup
);
431 #endif /* CONFIG_INET */
432 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
434 static void drain_all_stock_async(struct mem_cgroup
*memcg
);
436 static struct mem_cgroup_per_zone
*
437 mem_cgroup_zoneinfo(struct mem_cgroup
*memcg
, int nid
, int zid
)
439 return &memcg
->info
.nodeinfo
[nid
]->zoneinfo
[zid
];
442 struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*memcg
)
447 static struct mem_cgroup_per_zone
*
448 page_cgroup_zoneinfo(struct mem_cgroup
*memcg
, struct page
*page
)
450 int nid
= page_to_nid(page
);
451 int zid
= page_zonenum(page
);
453 return mem_cgroup_zoneinfo(memcg
, nid
, zid
);
456 static struct mem_cgroup_tree_per_zone
*
457 soft_limit_tree_node_zone(int nid
, int zid
)
459 return &soft_limit_tree
.rb_tree_per_node
[nid
]->rb_tree_per_zone
[zid
];
462 static struct mem_cgroup_tree_per_zone
*
463 soft_limit_tree_from_page(struct page
*page
)
465 int nid
= page_to_nid(page
);
466 int zid
= page_zonenum(page
);
468 return &soft_limit_tree
.rb_tree_per_node
[nid
]->rb_tree_per_zone
[zid
];
472 __mem_cgroup_insert_exceeded(struct mem_cgroup
*memcg
,
473 struct mem_cgroup_per_zone
*mz
,
474 struct mem_cgroup_tree_per_zone
*mctz
,
475 unsigned long long new_usage_in_excess
)
477 struct rb_node
**p
= &mctz
->rb_root
.rb_node
;
478 struct rb_node
*parent
= NULL
;
479 struct mem_cgroup_per_zone
*mz_node
;
484 mz
->usage_in_excess
= new_usage_in_excess
;
485 if (!mz
->usage_in_excess
)
489 mz_node
= rb_entry(parent
, struct mem_cgroup_per_zone
,
491 if (mz
->usage_in_excess
< mz_node
->usage_in_excess
)
494 * We can't avoid mem cgroups that are over their soft
495 * limit by the same amount
497 else if (mz
->usage_in_excess
>= mz_node
->usage_in_excess
)
500 rb_link_node(&mz
->tree_node
, parent
, p
);
501 rb_insert_color(&mz
->tree_node
, &mctz
->rb_root
);
506 __mem_cgroup_remove_exceeded(struct mem_cgroup
*memcg
,
507 struct mem_cgroup_per_zone
*mz
,
508 struct mem_cgroup_tree_per_zone
*mctz
)
512 rb_erase(&mz
->tree_node
, &mctz
->rb_root
);
517 mem_cgroup_remove_exceeded(struct mem_cgroup
*memcg
,
518 struct mem_cgroup_per_zone
*mz
,
519 struct mem_cgroup_tree_per_zone
*mctz
)
521 spin_lock(&mctz
->lock
);
522 __mem_cgroup_remove_exceeded(memcg
, mz
, mctz
);
523 spin_unlock(&mctz
->lock
);
527 static void mem_cgroup_update_tree(struct mem_cgroup
*memcg
, struct page
*page
)
529 unsigned long long excess
;
530 struct mem_cgroup_per_zone
*mz
;
531 struct mem_cgroup_tree_per_zone
*mctz
;
532 int nid
= page_to_nid(page
);
533 int zid
= page_zonenum(page
);
534 mctz
= soft_limit_tree_from_page(page
);
537 * Necessary to update all ancestors when hierarchy is used.
538 * because their event counter is not touched.
540 for (; memcg
; memcg
= parent_mem_cgroup(memcg
)) {
541 mz
= mem_cgroup_zoneinfo(memcg
, nid
, zid
);
542 excess
= res_counter_soft_limit_excess(&memcg
->res
);
544 * We have to update the tree if mz is on RB-tree or
545 * mem is over its softlimit.
547 if (excess
|| mz
->on_tree
) {
548 spin_lock(&mctz
->lock
);
549 /* if on-tree, remove it */
551 __mem_cgroup_remove_exceeded(memcg
, mz
, mctz
);
553 * Insert again. mz->usage_in_excess will be updated.
554 * If excess is 0, no tree ops.
556 __mem_cgroup_insert_exceeded(memcg
, mz
, mctz
, excess
);
557 spin_unlock(&mctz
->lock
);
562 static void mem_cgroup_remove_from_trees(struct mem_cgroup
*memcg
)
565 struct mem_cgroup_per_zone
*mz
;
566 struct mem_cgroup_tree_per_zone
*mctz
;
568 for_each_node_state(node
, N_POSSIBLE
) {
569 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
570 mz
= mem_cgroup_zoneinfo(memcg
, node
, zone
);
571 mctz
= soft_limit_tree_node_zone(node
, zone
);
572 mem_cgroup_remove_exceeded(memcg
, mz
, mctz
);
577 static struct mem_cgroup_per_zone
*
578 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone
*mctz
)
580 struct rb_node
*rightmost
= NULL
;
581 struct mem_cgroup_per_zone
*mz
;
585 rightmost
= rb_last(&mctz
->rb_root
);
587 goto done
; /* Nothing to reclaim from */
589 mz
= rb_entry(rightmost
, struct mem_cgroup_per_zone
, tree_node
);
591 * Remove the node now but someone else can add it back,
592 * we will to add it back at the end of reclaim to its correct
593 * position in the tree.
595 __mem_cgroup_remove_exceeded(mz
->mem
, mz
, mctz
);
596 if (!res_counter_soft_limit_excess(&mz
->mem
->res
) ||
597 !css_tryget(&mz
->mem
->css
))
603 static struct mem_cgroup_per_zone
*
604 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone
*mctz
)
606 struct mem_cgroup_per_zone
*mz
;
608 spin_lock(&mctz
->lock
);
609 mz
= __mem_cgroup_largest_soft_limit_node(mctz
);
610 spin_unlock(&mctz
->lock
);
615 * Implementation Note: reading percpu statistics for memcg.
617 * Both of vmstat[] and percpu_counter has threshold and do periodic
618 * synchronization to implement "quick" read. There are trade-off between
619 * reading cost and precision of value. Then, we may have a chance to implement
620 * a periodic synchronizion of counter in memcg's counter.
622 * But this _read() function is used for user interface now. The user accounts
623 * memory usage by memory cgroup and he _always_ requires exact value because
624 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
625 * have to visit all online cpus and make sum. So, for now, unnecessary
626 * synchronization is not implemented. (just implemented for cpu hotplug)
628 * If there are kernel internal actions which can make use of some not-exact
629 * value, and reading all cpu value can be performance bottleneck in some
630 * common workload, threashold and synchonization as vmstat[] should be
633 static long mem_cgroup_read_stat(struct mem_cgroup
*memcg
,
634 enum mem_cgroup_stat_index idx
)
640 for_each_online_cpu(cpu
)
641 val
+= per_cpu(memcg
->stat
->count
[idx
], cpu
);
642 #ifdef CONFIG_HOTPLUG_CPU
643 spin_lock(&memcg
->pcp_counter_lock
);
644 val
+= memcg
->nocpu_base
.count
[idx
];
645 spin_unlock(&memcg
->pcp_counter_lock
);
651 static void mem_cgroup_swap_statistics(struct mem_cgroup
*memcg
,
654 int val
= (charge
) ? 1 : -1;
655 this_cpu_add(memcg
->stat
->count
[MEM_CGROUP_STAT_SWAPOUT
], val
);
658 static unsigned long mem_cgroup_read_events(struct mem_cgroup
*memcg
,
659 enum mem_cgroup_events_index idx
)
661 unsigned long val
= 0;
664 for_each_online_cpu(cpu
)
665 val
+= per_cpu(memcg
->stat
->events
[idx
], cpu
);
666 #ifdef CONFIG_HOTPLUG_CPU
667 spin_lock(&memcg
->pcp_counter_lock
);
668 val
+= memcg
->nocpu_base
.events
[idx
];
669 spin_unlock(&memcg
->pcp_counter_lock
);
674 static void mem_cgroup_charge_statistics(struct mem_cgroup
*memcg
,
675 bool file
, int nr_pages
)
680 __this_cpu_add(memcg
->stat
->count
[MEM_CGROUP_STAT_CACHE
],
683 __this_cpu_add(memcg
->stat
->count
[MEM_CGROUP_STAT_RSS
],
686 /* pagein of a big page is an event. So, ignore page size */
688 __this_cpu_inc(memcg
->stat
->events
[MEM_CGROUP_EVENTS_PGPGIN
]);
690 __this_cpu_inc(memcg
->stat
->events
[MEM_CGROUP_EVENTS_PGPGOUT
]);
691 nr_pages
= -nr_pages
; /* for event */
694 __this_cpu_add(memcg
->stat
->events
[MEM_CGROUP_EVENTS_COUNT
], nr_pages
);
700 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
, int nid
, int zid
,
701 unsigned int lru_mask
)
703 struct mem_cgroup_per_zone
*mz
;
705 unsigned long ret
= 0;
707 mz
= mem_cgroup_zoneinfo(memcg
, nid
, zid
);
710 if (BIT(l
) & lru_mask
)
711 ret
+= MEM_CGROUP_ZSTAT(mz
, l
);
717 mem_cgroup_node_nr_lru_pages(struct mem_cgroup
*memcg
,
718 int nid
, unsigned int lru_mask
)
723 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++)
724 total
+= mem_cgroup_zone_nr_lru_pages(memcg
,
730 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup
*memcg
,
731 unsigned int lru_mask
)
736 for_each_node_state(nid
, N_HIGH_MEMORY
)
737 total
+= mem_cgroup_node_nr_lru_pages(memcg
, nid
, lru_mask
);
741 static bool mem_cgroup_event_ratelimit(struct mem_cgroup
*memcg
,
742 enum mem_cgroup_events_target target
)
744 unsigned long val
, next
;
746 val
= __this_cpu_read(memcg
->stat
->events
[MEM_CGROUP_EVENTS_COUNT
]);
747 next
= __this_cpu_read(memcg
->stat
->targets
[target
]);
748 /* from time_after() in jiffies.h */
749 if ((long)next
- (long)val
< 0) {
751 case MEM_CGROUP_TARGET_THRESH
:
752 next
= val
+ THRESHOLDS_EVENTS_TARGET
;
754 case MEM_CGROUP_TARGET_SOFTLIMIT
:
755 next
= val
+ SOFTLIMIT_EVENTS_TARGET
;
757 case MEM_CGROUP_TARGET_NUMAINFO
:
758 next
= val
+ NUMAINFO_EVENTS_TARGET
;
763 __this_cpu_write(memcg
->stat
->targets
[target
], next
);
770 * Check events in order.
773 static void memcg_check_events(struct mem_cgroup
*memcg
, struct page
*page
)
776 /* threshold event is triggered in finer grain than soft limit */
777 if (unlikely(mem_cgroup_event_ratelimit(memcg
,
778 MEM_CGROUP_TARGET_THRESH
))) {
779 bool do_softlimit
, do_numainfo
;
781 do_softlimit
= mem_cgroup_event_ratelimit(memcg
,
782 MEM_CGROUP_TARGET_SOFTLIMIT
);
784 do_numainfo
= mem_cgroup_event_ratelimit(memcg
,
785 MEM_CGROUP_TARGET_NUMAINFO
);
789 mem_cgroup_threshold(memcg
);
790 if (unlikely(do_softlimit
))
791 mem_cgroup_update_tree(memcg
, page
);
793 if (unlikely(do_numainfo
))
794 atomic_inc(&memcg
->numainfo_events
);
800 struct mem_cgroup
*mem_cgroup_from_cont(struct cgroup
*cont
)
802 return container_of(cgroup_subsys_state(cont
,
803 mem_cgroup_subsys_id
), struct mem_cgroup
,
807 struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
)
810 * mm_update_next_owner() may clear mm->owner to NULL
811 * if it races with swapoff, page migration, etc.
812 * So this can be called with p == NULL.
817 return container_of(task_subsys_state(p
, mem_cgroup_subsys_id
),
818 struct mem_cgroup
, css
);
821 struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
823 struct mem_cgroup
*memcg
= NULL
;
828 * Because we have no locks, mm->owner's may be being moved to other
829 * cgroup. We use css_tryget() here even if this looks
830 * pessimistic (rather than adding locks here).
834 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
835 if (unlikely(!memcg
))
837 } while (!css_tryget(&memcg
->css
));
843 * mem_cgroup_iter - iterate over memory cgroup hierarchy
844 * @root: hierarchy root
845 * @prev: previously returned memcg, NULL on first invocation
846 * @reclaim: cookie for shared reclaim walks, NULL for full walks
848 * Returns references to children of the hierarchy below @root, or
849 * @root itself, or %NULL after a full round-trip.
851 * Caller must pass the return value in @prev on subsequent
852 * invocations for reference counting, or use mem_cgroup_iter_break()
853 * to cancel a hierarchy walk before the round-trip is complete.
855 * Reclaimers can specify a zone and a priority level in @reclaim to
856 * divide up the memcgs in the hierarchy among all concurrent
857 * reclaimers operating on the same zone and priority.
859 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*root
,
860 struct mem_cgroup
*prev
,
861 struct mem_cgroup_reclaim_cookie
*reclaim
)
863 struct mem_cgroup
*memcg
= NULL
;
866 if (mem_cgroup_disabled())
870 root
= root_mem_cgroup
;
872 if (prev
&& !reclaim
)
873 id
= css_id(&prev
->css
);
875 if (prev
&& prev
!= root
)
878 if (!root
->use_hierarchy
&& root
!= root_mem_cgroup
) {
885 struct mem_cgroup_reclaim_iter
*uninitialized_var(iter
);
886 struct cgroup_subsys_state
*css
;
889 int nid
= zone_to_nid(reclaim
->zone
);
890 int zid
= zone_idx(reclaim
->zone
);
891 struct mem_cgroup_per_zone
*mz
;
893 mz
= mem_cgroup_zoneinfo(root
, nid
, zid
);
894 iter
= &mz
->reclaim_iter
[reclaim
->priority
];
895 if (prev
&& reclaim
->generation
!= iter
->generation
)
901 css
= css_get_next(&mem_cgroup_subsys
, id
+ 1, &root
->css
, &id
);
903 if (css
== &root
->css
|| css_tryget(css
))
904 memcg
= container_of(css
,
905 struct mem_cgroup
, css
);
914 else if (!prev
&& memcg
)
915 reclaim
->generation
= iter
->generation
;
925 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
926 * @root: hierarchy root
927 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
929 void mem_cgroup_iter_break(struct mem_cgroup
*root
,
930 struct mem_cgroup
*prev
)
933 root
= root_mem_cgroup
;
934 if (prev
&& prev
!= root
)
939 * Iteration constructs for visiting all cgroups (under a tree). If
940 * loops are exited prematurely (break), mem_cgroup_iter_break() must
941 * be used for reference counting.
943 #define for_each_mem_cgroup_tree(iter, root) \
944 for (iter = mem_cgroup_iter(root, NULL, NULL); \
946 iter = mem_cgroup_iter(root, iter, NULL))
948 #define for_each_mem_cgroup(iter) \
949 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
951 iter = mem_cgroup_iter(NULL, iter, NULL))
953 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
955 return (memcg
== root_mem_cgroup
);
958 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
)
960 struct mem_cgroup
*memcg
;
966 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
967 if (unlikely(!memcg
))
972 this_cpu_inc(memcg
->stat
->events
[MEM_CGROUP_EVENTS_PGFAULT
]);
975 this_cpu_inc(memcg
->stat
->events
[MEM_CGROUP_EVENTS_PGMAJFAULT
]);
983 EXPORT_SYMBOL(mem_cgroup_count_vm_event
);
986 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
987 * @zone: zone of the wanted lruvec
988 * @mem: memcg of the wanted lruvec
990 * Returns the lru list vector holding pages for the given @zone and
991 * @mem. This can be the global zone lruvec, if the memory controller
994 struct lruvec
*mem_cgroup_zone_lruvec(struct zone
*zone
,
995 struct mem_cgroup
*memcg
)
997 struct mem_cgroup_per_zone
*mz
;
999 if (mem_cgroup_disabled())
1000 return &zone
->lruvec
;
1002 mz
= mem_cgroup_zoneinfo(memcg
, zone_to_nid(zone
), zone_idx(zone
));
1007 * Following LRU functions are allowed to be used without PCG_LOCK.
1008 * Operations are called by routine of global LRU independently from memcg.
1009 * What we have to take care of here is validness of pc->mem_cgroup.
1011 * Changes to pc->mem_cgroup happens when
1014 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1015 * It is added to LRU before charge.
1016 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1017 * When moving account, the page is not on LRU. It's isolated.
1021 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
1022 * @zone: zone of the page
1026 * This function accounts for @page being added to @lru, and returns
1027 * the lruvec for the given @zone and the memcg @page is charged to.
1029 * The callsite is then responsible for physically linking the page to
1030 * the returned lruvec->lists[@lru].
1032 struct lruvec
*mem_cgroup_lru_add_list(struct zone
*zone
, struct page
*page
,
1035 struct mem_cgroup_per_zone
*mz
;
1036 struct mem_cgroup
*memcg
;
1037 struct page_cgroup
*pc
;
1039 if (mem_cgroup_disabled())
1040 return &zone
->lruvec
;
1042 pc
= lookup_page_cgroup(page
);
1043 VM_BUG_ON(PageCgroupAcctLRU(pc
));
1046 * SetPageLRU SetPageCgroupUsed
1048 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1050 * Ensure that one of the two sides adds the page to the memcg
1051 * LRU during a race.
1055 * If the page is uncharged, it may be freed soon, but it
1056 * could also be swap cache (readahead, swapoff) that needs to
1057 * be reclaimable in the future. root_mem_cgroup will babysit
1058 * it for the time being.
1060 if (PageCgroupUsed(pc
)) {
1061 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1063 memcg
= pc
->mem_cgroup
;
1064 SetPageCgroupAcctLRU(pc
);
1066 memcg
= root_mem_cgroup
;
1067 mz
= page_cgroup_zoneinfo(memcg
, page
);
1068 /* compound_order() is stabilized through lru_lock */
1069 MEM_CGROUP_ZSTAT(mz
, lru
) += 1 << compound_order(page
);
1074 * mem_cgroup_lru_del_list - account for removing an lru page
1078 * This function accounts for @page being removed from @lru.
1080 * The callsite is then responsible for physically unlinking
1083 void mem_cgroup_lru_del_list(struct page
*page
, enum lru_list lru
)
1085 struct mem_cgroup_per_zone
*mz
;
1086 struct mem_cgroup
*memcg
;
1087 struct page_cgroup
*pc
;
1089 if (mem_cgroup_disabled())
1092 pc
= lookup_page_cgroup(page
);
1094 * root_mem_cgroup babysits uncharged LRU pages, but
1095 * PageCgroupUsed is cleared when the page is about to get
1096 * freed. PageCgroupAcctLRU remembers whether the
1097 * LRU-accounting happened against pc->mem_cgroup or
1100 if (TestClearPageCgroupAcctLRU(pc
)) {
1101 VM_BUG_ON(!pc
->mem_cgroup
);
1102 memcg
= pc
->mem_cgroup
;
1104 memcg
= root_mem_cgroup
;
1105 mz
= page_cgroup_zoneinfo(memcg
, page
);
1106 /* huge page split is done under lru_lock. so, we have no races. */
1107 MEM_CGROUP_ZSTAT(mz
, lru
) -= 1 << compound_order(page
);
1110 void mem_cgroup_lru_del(struct page
*page
)
1112 mem_cgroup_lru_del_list(page
, page_lru(page
));
1116 * mem_cgroup_lru_move_lists - account for moving a page between lrus
1117 * @zone: zone of the page
1119 * @from: current lru
1122 * This function accounts for @page being moved between the lrus @from
1123 * and @to, and returns the lruvec for the given @zone and the memcg
1124 * @page is charged to.
1126 * The callsite is then responsible for physically relinking
1127 * @page->lru to the returned lruvec->lists[@to].
1129 struct lruvec
*mem_cgroup_lru_move_lists(struct zone
*zone
,
1134 /* XXX: Optimize this, especially for @from == @to */
1135 mem_cgroup_lru_del_list(page
, from
);
1136 return mem_cgroup_lru_add_list(zone
, page
, to
);
1140 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1141 * while it's linked to lru because the page may be reused after it's fully
1142 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1143 * It's done under lock_page and expected that zone->lru_lock isnever held.
1145 static void mem_cgroup_lru_del_before_commit(struct page
*page
)
1148 unsigned long flags
;
1149 struct zone
*zone
= page_zone(page
);
1150 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
1153 * Doing this check without taking ->lru_lock seems wrong but this
1154 * is safe. Because if page_cgroup's USED bit is unset, the page
1155 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1156 * set, the commit after this will fail, anyway.
1157 * This all charge/uncharge is done under some mutual execustion.
1158 * So, we don't need to taking care of changes in USED bit.
1160 if (likely(!PageLRU(page
)))
1163 spin_lock_irqsave(&zone
->lru_lock
, flags
);
1164 lru
= page_lru(page
);
1166 * The uncharged page could still be registered to the LRU of
1167 * the stale pc->mem_cgroup.
1169 * As pc->mem_cgroup is about to get overwritten, the old LRU
1170 * accounting needs to be taken care of. Let root_mem_cgroup
1171 * babysit the page until the new memcg is responsible for it.
1173 * The PCG_USED bit is guarded by lock_page() as the page is
1174 * swapcache/pagecache.
1176 if (PageLRU(page
) && PageCgroupAcctLRU(pc
) && !PageCgroupUsed(pc
)) {
1177 del_page_from_lru_list(zone
, page
, lru
);
1178 add_page_to_lru_list(zone
, page
, lru
);
1180 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
1183 static void mem_cgroup_lru_add_after_commit(struct page
*page
)
1186 unsigned long flags
;
1187 struct zone
*zone
= page_zone(page
);
1188 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
1191 * SetPageLRU SetPageCgroupUsed
1193 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1195 * Ensure that one of the two sides adds the page to the memcg
1196 * LRU during a race.
1199 /* taking care of that the page is added to LRU while we commit it */
1200 if (likely(!PageLRU(page
)))
1202 spin_lock_irqsave(&zone
->lru_lock
, flags
);
1203 lru
= page_lru(page
);
1205 * If the page is not on the LRU, someone will soon put it
1206 * there. If it is, and also already accounted for on the
1207 * memcg-side, it must be on the right lruvec as setting
1208 * pc->mem_cgroup and PageCgroupUsed is properly ordered.
1209 * Otherwise, root_mem_cgroup has been babysitting the page
1210 * during the charge. Move it to the new memcg now.
1212 if (PageLRU(page
) && !PageCgroupAcctLRU(pc
)) {
1213 del_page_from_lru_list(zone
, page
, lru
);
1214 add_page_to_lru_list(zone
, page
, lru
);
1216 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
1220 * Checks whether given mem is same or in the root_mem_cgroup's
1223 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup
*root_memcg
,
1224 struct mem_cgroup
*memcg
)
1226 if (root_memcg
!= memcg
) {
1227 return (root_memcg
->use_hierarchy
&&
1228 css_is_ancestor(&memcg
->css
, &root_memcg
->css
));
1234 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*memcg
)
1237 struct mem_cgroup
*curr
= NULL
;
1238 struct task_struct
*p
;
1240 p
= find_lock_task_mm(task
);
1243 curr
= try_get_mem_cgroup_from_mm(p
->mm
);
1248 * We should check use_hierarchy of "memcg" not "curr". Because checking
1249 * use_hierarchy of "curr" here make this function true if hierarchy is
1250 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1251 * hierarchy(even if use_hierarchy is disabled in "memcg").
1253 ret
= mem_cgroup_same_or_subtree(memcg
, curr
);
1254 css_put(&curr
->css
);
1258 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
, struct zone
*zone
)
1260 unsigned long inactive_ratio
;
1261 int nid
= zone_to_nid(zone
);
1262 int zid
= zone_idx(zone
);
1263 unsigned long inactive
;
1264 unsigned long active
;
1267 inactive
= mem_cgroup_zone_nr_lru_pages(memcg
, nid
, zid
,
1268 BIT(LRU_INACTIVE_ANON
));
1269 active
= mem_cgroup_zone_nr_lru_pages(memcg
, nid
, zid
,
1270 BIT(LRU_ACTIVE_ANON
));
1272 gb
= (inactive
+ active
) >> (30 - PAGE_SHIFT
);
1274 inactive_ratio
= int_sqrt(10 * gb
);
1278 return inactive
* inactive_ratio
< active
;
1281 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
, struct zone
*zone
)
1283 unsigned long active
;
1284 unsigned long inactive
;
1285 int zid
= zone_idx(zone
);
1286 int nid
= zone_to_nid(zone
);
1288 inactive
= mem_cgroup_zone_nr_lru_pages(memcg
, nid
, zid
,
1289 BIT(LRU_INACTIVE_FILE
));
1290 active
= mem_cgroup_zone_nr_lru_pages(memcg
, nid
, zid
,
1291 BIT(LRU_ACTIVE_FILE
));
1293 return (active
> inactive
);
1296 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
1299 int nid
= zone_to_nid(zone
);
1300 int zid
= zone_idx(zone
);
1301 struct mem_cgroup_per_zone
*mz
= mem_cgroup_zoneinfo(memcg
, nid
, zid
);
1303 return &mz
->reclaim_stat
;
1306 struct zone_reclaim_stat
*
1307 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
1309 struct page_cgroup
*pc
;
1310 struct mem_cgroup_per_zone
*mz
;
1312 if (mem_cgroup_disabled())
1315 pc
= lookup_page_cgroup(page
);
1316 if (!PageCgroupUsed(pc
))
1318 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1320 mz
= page_cgroup_zoneinfo(pc
->mem_cgroup
, page
);
1321 return &mz
->reclaim_stat
;
1324 #define mem_cgroup_from_res_counter(counter, member) \
1325 container_of(counter, struct mem_cgroup, member)
1328 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1329 * @mem: the memory cgroup
1331 * Returns the maximum amount of memory @mem can be charged with, in
1334 static unsigned long mem_cgroup_margin(struct mem_cgroup
*memcg
)
1336 unsigned long long margin
;
1338 margin
= res_counter_margin(&memcg
->res
);
1339 if (do_swap_account
)
1340 margin
= min(margin
, res_counter_margin(&memcg
->memsw
));
1341 return margin
>> PAGE_SHIFT
;
1344 int mem_cgroup_swappiness(struct mem_cgroup
*memcg
)
1346 struct cgroup
*cgrp
= memcg
->css
.cgroup
;
1349 if (cgrp
->parent
== NULL
)
1350 return vm_swappiness
;
1352 return memcg
->swappiness
;
1355 static void mem_cgroup_start_move(struct mem_cgroup
*memcg
)
1360 spin_lock(&memcg
->pcp_counter_lock
);
1361 for_each_online_cpu(cpu
)
1362 per_cpu(memcg
->stat
->count
[MEM_CGROUP_ON_MOVE
], cpu
) += 1;
1363 memcg
->nocpu_base
.count
[MEM_CGROUP_ON_MOVE
] += 1;
1364 spin_unlock(&memcg
->pcp_counter_lock
);
1370 static void mem_cgroup_end_move(struct mem_cgroup
*memcg
)
1377 spin_lock(&memcg
->pcp_counter_lock
);
1378 for_each_online_cpu(cpu
)
1379 per_cpu(memcg
->stat
->count
[MEM_CGROUP_ON_MOVE
], cpu
) -= 1;
1380 memcg
->nocpu_base
.count
[MEM_CGROUP_ON_MOVE
] -= 1;
1381 spin_unlock(&memcg
->pcp_counter_lock
);
1385 * 2 routines for checking "mem" is under move_account() or not.
1387 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1388 * for avoiding race in accounting. If true,
1389 * pc->mem_cgroup may be overwritten.
1391 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1392 * under hierarchy of moving cgroups. This is for
1393 * waiting at hith-memory prressure caused by "move".
1396 static bool mem_cgroup_stealed(struct mem_cgroup
*memcg
)
1398 VM_BUG_ON(!rcu_read_lock_held());
1399 return this_cpu_read(memcg
->stat
->count
[MEM_CGROUP_ON_MOVE
]) > 0;
1402 static bool mem_cgroup_under_move(struct mem_cgroup
*memcg
)
1404 struct mem_cgroup
*from
;
1405 struct mem_cgroup
*to
;
1408 * Unlike task_move routines, we access mc.to, mc.from not under
1409 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1411 spin_lock(&mc
.lock
);
1417 ret
= mem_cgroup_same_or_subtree(memcg
, from
)
1418 || mem_cgroup_same_or_subtree(memcg
, to
);
1420 spin_unlock(&mc
.lock
);
1424 static bool mem_cgroup_wait_acct_move(struct mem_cgroup
*memcg
)
1426 if (mc
.moving_task
&& current
!= mc
.moving_task
) {
1427 if (mem_cgroup_under_move(memcg
)) {
1429 prepare_to_wait(&mc
.waitq
, &wait
, TASK_INTERRUPTIBLE
);
1430 /* moving charge context might have finished. */
1433 finish_wait(&mc
.waitq
, &wait
);
1441 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1442 * @memcg: The memory cgroup that went over limit
1443 * @p: Task that is going to be killed
1445 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1448 void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
1450 struct cgroup
*task_cgrp
;
1451 struct cgroup
*mem_cgrp
;
1453 * Need a buffer in BSS, can't rely on allocations. The code relies
1454 * on the assumption that OOM is serialized for memory controller.
1455 * If this assumption is broken, revisit this code.
1457 static char memcg_name
[PATH_MAX
];
1466 mem_cgrp
= memcg
->css
.cgroup
;
1467 task_cgrp
= task_cgroup(p
, mem_cgroup_subsys_id
);
1469 ret
= cgroup_path(task_cgrp
, memcg_name
, PATH_MAX
);
1472 * Unfortunately, we are unable to convert to a useful name
1473 * But we'll still print out the usage information
1480 printk(KERN_INFO
"Task in %s killed", memcg_name
);
1483 ret
= cgroup_path(mem_cgrp
, memcg_name
, PATH_MAX
);
1491 * Continues from above, so we don't need an KERN_ level
1493 printk(KERN_CONT
" as a result of limit of %s\n", memcg_name
);
1496 printk(KERN_INFO
"memory: usage %llukB, limit %llukB, failcnt %llu\n",
1497 res_counter_read_u64(&memcg
->res
, RES_USAGE
) >> 10,
1498 res_counter_read_u64(&memcg
->res
, RES_LIMIT
) >> 10,
1499 res_counter_read_u64(&memcg
->res
, RES_FAILCNT
));
1500 printk(KERN_INFO
"memory+swap: usage %llukB, limit %llukB, "
1502 res_counter_read_u64(&memcg
->memsw
, RES_USAGE
) >> 10,
1503 res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
) >> 10,
1504 res_counter_read_u64(&memcg
->memsw
, RES_FAILCNT
));
1508 * This function returns the number of memcg under hierarchy tree. Returns
1509 * 1(self count) if no children.
1511 static int mem_cgroup_count_children(struct mem_cgroup
*memcg
)
1514 struct mem_cgroup
*iter
;
1516 for_each_mem_cgroup_tree(iter
, memcg
)
1522 * Return the memory (and swap, if configured) limit for a memcg.
1524 u64
mem_cgroup_get_limit(struct mem_cgroup
*memcg
)
1529 limit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
1530 limit
+= total_swap_pages
<< PAGE_SHIFT
;
1532 memsw
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
1534 * If memsw is finite and limits the amount of swap space available
1535 * to this memcg, return that limit.
1537 return min(limit
, memsw
);
1540 static unsigned long mem_cgroup_reclaim(struct mem_cgroup
*memcg
,
1542 unsigned long flags
)
1544 unsigned long total
= 0;
1545 bool noswap
= false;
1548 if (flags
& MEM_CGROUP_RECLAIM_NOSWAP
)
1550 if (!(flags
& MEM_CGROUP_RECLAIM_SHRINK
) && memcg
->memsw_is_minimum
)
1553 for (loop
= 0; loop
< MEM_CGROUP_MAX_RECLAIM_LOOPS
; loop
++) {
1555 drain_all_stock_async(memcg
);
1556 total
+= try_to_free_mem_cgroup_pages(memcg
, gfp_mask
, noswap
);
1558 * Allow limit shrinkers, which are triggered directly
1559 * by userspace, to catch signals and stop reclaim
1560 * after minimal progress, regardless of the margin.
1562 if (total
&& (flags
& MEM_CGROUP_RECLAIM_SHRINK
))
1564 if (mem_cgroup_margin(memcg
))
1567 * If nothing was reclaimed after two attempts, there
1568 * may be no reclaimable pages in this hierarchy.
1577 * test_mem_cgroup_node_reclaimable
1578 * @mem: the target memcg
1579 * @nid: the node ID to be checked.
1580 * @noswap : specify true here if the user wants flle only information.
1582 * This function returns whether the specified memcg contains any
1583 * reclaimable pages on a node. Returns true if there are any reclaimable
1584 * pages in the node.
1586 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup
*memcg
,
1587 int nid
, bool noswap
)
1589 if (mem_cgroup_node_nr_lru_pages(memcg
, nid
, LRU_ALL_FILE
))
1591 if (noswap
|| !total_swap_pages
)
1593 if (mem_cgroup_node_nr_lru_pages(memcg
, nid
, LRU_ALL_ANON
))
1598 #if MAX_NUMNODES > 1
1601 * Always updating the nodemask is not very good - even if we have an empty
1602 * list or the wrong list here, we can start from some node and traverse all
1603 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1606 static void mem_cgroup_may_update_nodemask(struct mem_cgroup
*memcg
)
1610 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1611 * pagein/pageout changes since the last update.
1613 if (!atomic_read(&memcg
->numainfo_events
))
1615 if (atomic_inc_return(&memcg
->numainfo_updating
) > 1)
1618 /* make a nodemask where this memcg uses memory from */
1619 memcg
->scan_nodes
= node_states
[N_HIGH_MEMORY
];
1621 for_each_node_mask(nid
, node_states
[N_HIGH_MEMORY
]) {
1623 if (!test_mem_cgroup_node_reclaimable(memcg
, nid
, false))
1624 node_clear(nid
, memcg
->scan_nodes
);
1627 atomic_set(&memcg
->numainfo_events
, 0);
1628 atomic_set(&memcg
->numainfo_updating
, 0);
1632 * Selecting a node where we start reclaim from. Because what we need is just
1633 * reducing usage counter, start from anywhere is O,K. Considering
1634 * memory reclaim from current node, there are pros. and cons.
1636 * Freeing memory from current node means freeing memory from a node which
1637 * we'll use or we've used. So, it may make LRU bad. And if several threads
1638 * hit limits, it will see a contention on a node. But freeing from remote
1639 * node means more costs for memory reclaim because of memory latency.
1641 * Now, we use round-robin. Better algorithm is welcomed.
1643 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
)
1647 mem_cgroup_may_update_nodemask(memcg
);
1648 node
= memcg
->last_scanned_node
;
1650 node
= next_node(node
, memcg
->scan_nodes
);
1651 if (node
== MAX_NUMNODES
)
1652 node
= first_node(memcg
->scan_nodes
);
1654 * We call this when we hit limit, not when pages are added to LRU.
1655 * No LRU may hold pages because all pages are UNEVICTABLE or
1656 * memcg is too small and all pages are not on LRU. In that case,
1657 * we use curret node.
1659 if (unlikely(node
== MAX_NUMNODES
))
1660 node
= numa_node_id();
1662 memcg
->last_scanned_node
= node
;
1667 * Check all nodes whether it contains reclaimable pages or not.
1668 * For quick scan, we make use of scan_nodes. This will allow us to skip
1669 * unused nodes. But scan_nodes is lazily updated and may not cotain
1670 * enough new information. We need to do double check.
1672 bool mem_cgroup_reclaimable(struct mem_cgroup
*memcg
, bool noswap
)
1677 * quick check...making use of scan_node.
1678 * We can skip unused nodes.
1680 if (!nodes_empty(memcg
->scan_nodes
)) {
1681 for (nid
= first_node(memcg
->scan_nodes
);
1683 nid
= next_node(nid
, memcg
->scan_nodes
)) {
1685 if (test_mem_cgroup_node_reclaimable(memcg
, nid
, noswap
))
1690 * Check rest of nodes.
1692 for_each_node_state(nid
, N_HIGH_MEMORY
) {
1693 if (node_isset(nid
, memcg
->scan_nodes
))
1695 if (test_mem_cgroup_node_reclaimable(memcg
, nid
, noswap
))
1702 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
)
1707 bool mem_cgroup_reclaimable(struct mem_cgroup
*memcg
, bool noswap
)
1709 return test_mem_cgroup_node_reclaimable(memcg
, 0, noswap
);
1713 static int mem_cgroup_soft_reclaim(struct mem_cgroup
*root_memcg
,
1716 unsigned long *total_scanned
)
1718 struct mem_cgroup
*victim
= NULL
;
1721 unsigned long excess
;
1722 unsigned long nr_scanned
;
1723 struct mem_cgroup_reclaim_cookie reclaim
= {
1728 excess
= res_counter_soft_limit_excess(&root_memcg
->res
) >> PAGE_SHIFT
;
1731 victim
= mem_cgroup_iter(root_memcg
, victim
, &reclaim
);
1736 * If we have not been able to reclaim
1737 * anything, it might because there are
1738 * no reclaimable pages under this hierarchy
1743 * We want to do more targeted reclaim.
1744 * excess >> 2 is not to excessive so as to
1745 * reclaim too much, nor too less that we keep
1746 * coming back to reclaim from this cgroup
1748 if (total
>= (excess
>> 2) ||
1749 (loop
> MEM_CGROUP_MAX_RECLAIM_LOOPS
))
1754 if (!mem_cgroup_reclaimable(victim
, false))
1756 total
+= mem_cgroup_shrink_node_zone(victim
, gfp_mask
, false,
1758 *total_scanned
+= nr_scanned
;
1759 if (!res_counter_soft_limit_excess(&root_memcg
->res
))
1762 mem_cgroup_iter_break(root_memcg
, victim
);
1767 * Check OOM-Killer is already running under our hierarchy.
1768 * If someone is running, return false.
1769 * Has to be called with memcg_oom_lock
1771 static bool mem_cgroup_oom_lock(struct mem_cgroup
*memcg
)
1773 struct mem_cgroup
*iter
, *failed
= NULL
;
1775 for_each_mem_cgroup_tree(iter
, memcg
) {
1776 if (iter
->oom_lock
) {
1778 * this subtree of our hierarchy is already locked
1779 * so we cannot give a lock.
1782 mem_cgroup_iter_break(memcg
, iter
);
1785 iter
->oom_lock
= true;
1792 * OK, we failed to lock the whole subtree so we have to clean up
1793 * what we set up to the failing subtree
1795 for_each_mem_cgroup_tree(iter
, memcg
) {
1796 if (iter
== failed
) {
1797 mem_cgroup_iter_break(memcg
, iter
);
1800 iter
->oom_lock
= false;
1806 * Has to be called with memcg_oom_lock
1808 static int mem_cgroup_oom_unlock(struct mem_cgroup
*memcg
)
1810 struct mem_cgroup
*iter
;
1812 for_each_mem_cgroup_tree(iter
, memcg
)
1813 iter
->oom_lock
= false;
1817 static void mem_cgroup_mark_under_oom(struct mem_cgroup
*memcg
)
1819 struct mem_cgroup
*iter
;
1821 for_each_mem_cgroup_tree(iter
, memcg
)
1822 atomic_inc(&iter
->under_oom
);
1825 static void mem_cgroup_unmark_under_oom(struct mem_cgroup
*memcg
)
1827 struct mem_cgroup
*iter
;
1830 * When a new child is created while the hierarchy is under oom,
1831 * mem_cgroup_oom_lock() may not be called. We have to use
1832 * atomic_add_unless() here.
1834 for_each_mem_cgroup_tree(iter
, memcg
)
1835 atomic_add_unless(&iter
->under_oom
, -1, 0);
1838 static DEFINE_SPINLOCK(memcg_oom_lock
);
1839 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq
);
1841 struct oom_wait_info
{
1842 struct mem_cgroup
*mem
;
1846 static int memcg_oom_wake_function(wait_queue_t
*wait
,
1847 unsigned mode
, int sync
, void *arg
)
1849 struct mem_cgroup
*wake_memcg
= (struct mem_cgroup
*)arg
,
1851 struct oom_wait_info
*oom_wait_info
;
1853 oom_wait_info
= container_of(wait
, struct oom_wait_info
, wait
);
1854 oom_wait_memcg
= oom_wait_info
->mem
;
1857 * Both of oom_wait_info->mem and wake_mem are stable under us.
1858 * Then we can use css_is_ancestor without taking care of RCU.
1860 if (!mem_cgroup_same_or_subtree(oom_wait_memcg
, wake_memcg
)
1861 && !mem_cgroup_same_or_subtree(wake_memcg
, oom_wait_memcg
))
1863 return autoremove_wake_function(wait
, mode
, sync
, arg
);
1866 static void memcg_wakeup_oom(struct mem_cgroup
*memcg
)
1868 /* for filtering, pass "memcg" as argument. */
1869 __wake_up(&memcg_oom_waitq
, TASK_NORMAL
, 0, memcg
);
1872 static void memcg_oom_recover(struct mem_cgroup
*memcg
)
1874 if (memcg
&& atomic_read(&memcg
->under_oom
))
1875 memcg_wakeup_oom(memcg
);
1879 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1881 bool mem_cgroup_handle_oom(struct mem_cgroup
*memcg
, gfp_t mask
)
1883 struct oom_wait_info owait
;
1884 bool locked
, need_to_kill
;
1887 owait
.wait
.flags
= 0;
1888 owait
.wait
.func
= memcg_oom_wake_function
;
1889 owait
.wait
.private = current
;
1890 INIT_LIST_HEAD(&owait
.wait
.task_list
);
1891 need_to_kill
= true;
1892 mem_cgroup_mark_under_oom(memcg
);
1894 /* At first, try to OOM lock hierarchy under memcg.*/
1895 spin_lock(&memcg_oom_lock
);
1896 locked
= mem_cgroup_oom_lock(memcg
);
1898 * Even if signal_pending(), we can't quit charge() loop without
1899 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1900 * under OOM is always welcomed, use TASK_KILLABLE here.
1902 prepare_to_wait(&memcg_oom_waitq
, &owait
.wait
, TASK_KILLABLE
);
1903 if (!locked
|| memcg
->oom_kill_disable
)
1904 need_to_kill
= false;
1906 mem_cgroup_oom_notify(memcg
);
1907 spin_unlock(&memcg_oom_lock
);
1910 finish_wait(&memcg_oom_waitq
, &owait
.wait
);
1911 mem_cgroup_out_of_memory(memcg
, mask
);
1914 finish_wait(&memcg_oom_waitq
, &owait
.wait
);
1916 spin_lock(&memcg_oom_lock
);
1918 mem_cgroup_oom_unlock(memcg
);
1919 memcg_wakeup_oom(memcg
);
1920 spin_unlock(&memcg_oom_lock
);
1922 mem_cgroup_unmark_under_oom(memcg
);
1924 if (test_thread_flag(TIF_MEMDIE
) || fatal_signal_pending(current
))
1926 /* Give chance to dying process */
1927 schedule_timeout_uninterruptible(1);
1932 * Currently used to update mapped file statistics, but the routine can be
1933 * generalized to update other statistics as well.
1935 * Notes: Race condition
1937 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1938 * it tends to be costly. But considering some conditions, we doesn't need
1939 * to do so _always_.
1941 * Considering "charge", lock_page_cgroup() is not required because all
1942 * file-stat operations happen after a page is attached to radix-tree. There
1943 * are no race with "charge".
1945 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1946 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1947 * if there are race with "uncharge". Statistics itself is properly handled
1950 * Considering "move", this is an only case we see a race. To make the race
1951 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1952 * possibility of race condition. If there is, we take a lock.
1955 void mem_cgroup_update_page_stat(struct page
*page
,
1956 enum mem_cgroup_page_stat_item idx
, int val
)
1958 struct mem_cgroup
*memcg
;
1959 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
1960 bool need_unlock
= false;
1961 unsigned long uninitialized_var(flags
);
1963 if (mem_cgroup_disabled())
1967 memcg
= pc
->mem_cgroup
;
1968 if (unlikely(!memcg
|| !PageCgroupUsed(pc
)))
1970 /* pc->mem_cgroup is unstable ? */
1971 if (unlikely(mem_cgroup_stealed(memcg
)) || PageTransHuge(page
)) {
1972 /* take a lock against to access pc->mem_cgroup */
1973 move_lock_page_cgroup(pc
, &flags
);
1975 memcg
= pc
->mem_cgroup
;
1976 if (!memcg
|| !PageCgroupUsed(pc
))
1981 case MEMCG_NR_FILE_MAPPED
:
1983 SetPageCgroupFileMapped(pc
);
1984 else if (!page_mapped(page
))
1985 ClearPageCgroupFileMapped(pc
);
1986 idx
= MEM_CGROUP_STAT_FILE_MAPPED
;
1992 this_cpu_add(memcg
->stat
->count
[idx
], val
);
1995 if (unlikely(need_unlock
))
1996 move_unlock_page_cgroup(pc
, &flags
);
2000 EXPORT_SYMBOL(mem_cgroup_update_page_stat
);
2003 * size of first charge trial. "32" comes from vmscan.c's magic value.
2004 * TODO: maybe necessary to use big numbers in big irons.
2006 #define CHARGE_BATCH 32U
2007 struct memcg_stock_pcp
{
2008 struct mem_cgroup
*cached
; /* this never be root cgroup */
2009 unsigned int nr_pages
;
2010 struct work_struct work
;
2011 unsigned long flags
;
2012 #define FLUSHING_CACHED_CHARGE (0)
2014 static DEFINE_PER_CPU(struct memcg_stock_pcp
, memcg_stock
);
2015 static DEFINE_MUTEX(percpu_charge_mutex
);
2018 * Try to consume stocked charge on this cpu. If success, one page is consumed
2019 * from local stock and true is returned. If the stock is 0 or charges from a
2020 * cgroup which is not current target, returns false. This stock will be
2023 static bool consume_stock(struct mem_cgroup
*memcg
)
2025 struct memcg_stock_pcp
*stock
;
2028 stock
= &get_cpu_var(memcg_stock
);
2029 if (memcg
== stock
->cached
&& stock
->nr_pages
)
2031 else /* need to call res_counter_charge */
2033 put_cpu_var(memcg_stock
);
2038 * Returns stocks cached in percpu to res_counter and reset cached information.
2040 static void drain_stock(struct memcg_stock_pcp
*stock
)
2042 struct mem_cgroup
*old
= stock
->cached
;
2044 if (stock
->nr_pages
) {
2045 unsigned long bytes
= stock
->nr_pages
* PAGE_SIZE
;
2047 res_counter_uncharge(&old
->res
, bytes
);
2048 if (do_swap_account
)
2049 res_counter_uncharge(&old
->memsw
, bytes
);
2050 stock
->nr_pages
= 0;
2052 stock
->cached
= NULL
;
2056 * This must be called under preempt disabled or must be called by
2057 * a thread which is pinned to local cpu.
2059 static void drain_local_stock(struct work_struct
*dummy
)
2061 struct memcg_stock_pcp
*stock
= &__get_cpu_var(memcg_stock
);
2063 clear_bit(FLUSHING_CACHED_CHARGE
, &stock
->flags
);
2067 * Cache charges(val) which is from res_counter, to local per_cpu area.
2068 * This will be consumed by consume_stock() function, later.
2070 static void refill_stock(struct mem_cgroup
*memcg
, unsigned int nr_pages
)
2072 struct memcg_stock_pcp
*stock
= &get_cpu_var(memcg_stock
);
2074 if (stock
->cached
!= memcg
) { /* reset if necessary */
2076 stock
->cached
= memcg
;
2078 stock
->nr_pages
+= nr_pages
;
2079 put_cpu_var(memcg_stock
);
2083 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2084 * of the hierarchy under it. sync flag says whether we should block
2085 * until the work is done.
2087 static void drain_all_stock(struct mem_cgroup
*root_memcg
, bool sync
)
2091 /* Notify other cpus that system-wide "drain" is running */
2094 for_each_online_cpu(cpu
) {
2095 struct memcg_stock_pcp
*stock
= &per_cpu(memcg_stock
, cpu
);
2096 struct mem_cgroup
*memcg
;
2098 memcg
= stock
->cached
;
2099 if (!memcg
|| !stock
->nr_pages
)
2101 if (!mem_cgroup_same_or_subtree(root_memcg
, memcg
))
2103 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE
, &stock
->flags
)) {
2105 drain_local_stock(&stock
->work
);
2107 schedule_work_on(cpu
, &stock
->work
);
2115 for_each_online_cpu(cpu
) {
2116 struct memcg_stock_pcp
*stock
= &per_cpu(memcg_stock
, cpu
);
2117 if (test_bit(FLUSHING_CACHED_CHARGE
, &stock
->flags
))
2118 flush_work(&stock
->work
);
2125 * Tries to drain stocked charges in other cpus. This function is asynchronous
2126 * and just put a work per cpu for draining localy on each cpu. Caller can
2127 * expects some charges will be back to res_counter later but cannot wait for
2130 static void drain_all_stock_async(struct mem_cgroup
*root_memcg
)
2133 * If someone calls draining, avoid adding more kworker runs.
2135 if (!mutex_trylock(&percpu_charge_mutex
))
2137 drain_all_stock(root_memcg
, false);
2138 mutex_unlock(&percpu_charge_mutex
);
2141 /* This is a synchronous drain interface. */
2142 static void drain_all_stock_sync(struct mem_cgroup
*root_memcg
)
2144 /* called when force_empty is called */
2145 mutex_lock(&percpu_charge_mutex
);
2146 drain_all_stock(root_memcg
, true);
2147 mutex_unlock(&percpu_charge_mutex
);
2151 * This function drains percpu counter value from DEAD cpu and
2152 * move it to local cpu. Note that this function can be preempted.
2154 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup
*memcg
, int cpu
)
2158 spin_lock(&memcg
->pcp_counter_lock
);
2159 for (i
= 0; i
< MEM_CGROUP_STAT_DATA
; i
++) {
2160 long x
= per_cpu(memcg
->stat
->count
[i
], cpu
);
2162 per_cpu(memcg
->stat
->count
[i
], cpu
) = 0;
2163 memcg
->nocpu_base
.count
[i
] += x
;
2165 for (i
= 0; i
< MEM_CGROUP_EVENTS_NSTATS
; i
++) {
2166 unsigned long x
= per_cpu(memcg
->stat
->events
[i
], cpu
);
2168 per_cpu(memcg
->stat
->events
[i
], cpu
) = 0;
2169 memcg
->nocpu_base
.events
[i
] += x
;
2171 /* need to clear ON_MOVE value, works as a kind of lock. */
2172 per_cpu(memcg
->stat
->count
[MEM_CGROUP_ON_MOVE
], cpu
) = 0;
2173 spin_unlock(&memcg
->pcp_counter_lock
);
2176 static void synchronize_mem_cgroup_on_move(struct mem_cgroup
*memcg
, int cpu
)
2178 int idx
= MEM_CGROUP_ON_MOVE
;
2180 spin_lock(&memcg
->pcp_counter_lock
);
2181 per_cpu(memcg
->stat
->count
[idx
], cpu
) = memcg
->nocpu_base
.count
[idx
];
2182 spin_unlock(&memcg
->pcp_counter_lock
);
2185 static int __cpuinit
memcg_cpu_hotplug_callback(struct notifier_block
*nb
,
2186 unsigned long action
,
2189 int cpu
= (unsigned long)hcpu
;
2190 struct memcg_stock_pcp
*stock
;
2191 struct mem_cgroup
*iter
;
2193 if ((action
== CPU_ONLINE
)) {
2194 for_each_mem_cgroup(iter
)
2195 synchronize_mem_cgroup_on_move(iter
, cpu
);
2199 if ((action
!= CPU_DEAD
) || action
!= CPU_DEAD_FROZEN
)
2202 for_each_mem_cgroup(iter
)
2203 mem_cgroup_drain_pcp_counter(iter
, cpu
);
2205 stock
= &per_cpu(memcg_stock
, cpu
);
2211 /* See __mem_cgroup_try_charge() for details */
2213 CHARGE_OK
, /* success */
2214 CHARGE_RETRY
, /* need to retry but retry is not bad */
2215 CHARGE_NOMEM
, /* we can't do more. return -ENOMEM */
2216 CHARGE_WOULDBLOCK
, /* GFP_WAIT wasn't set and no enough res. */
2217 CHARGE_OOM_DIE
, /* the current is killed because of OOM */
2220 static int mem_cgroup_do_charge(struct mem_cgroup
*memcg
, gfp_t gfp_mask
,
2221 unsigned int nr_pages
, bool oom_check
)
2223 unsigned long csize
= nr_pages
* PAGE_SIZE
;
2224 struct mem_cgroup
*mem_over_limit
;
2225 struct res_counter
*fail_res
;
2226 unsigned long flags
= 0;
2229 ret
= res_counter_charge(&memcg
->res
, csize
, &fail_res
);
2232 if (!do_swap_account
)
2234 ret
= res_counter_charge(&memcg
->memsw
, csize
, &fail_res
);
2238 res_counter_uncharge(&memcg
->res
, csize
);
2239 mem_over_limit
= mem_cgroup_from_res_counter(fail_res
, memsw
);
2240 flags
|= MEM_CGROUP_RECLAIM_NOSWAP
;
2242 mem_over_limit
= mem_cgroup_from_res_counter(fail_res
, res
);
2244 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2245 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2247 * Never reclaim on behalf of optional batching, retry with a
2248 * single page instead.
2250 if (nr_pages
== CHARGE_BATCH
)
2251 return CHARGE_RETRY
;
2253 if (!(gfp_mask
& __GFP_WAIT
))
2254 return CHARGE_WOULDBLOCK
;
2256 ret
= mem_cgroup_reclaim(mem_over_limit
, gfp_mask
, flags
);
2257 if (mem_cgroup_margin(mem_over_limit
) >= nr_pages
)
2258 return CHARGE_RETRY
;
2260 * Even though the limit is exceeded at this point, reclaim
2261 * may have been able to free some pages. Retry the charge
2262 * before killing the task.
2264 * Only for regular pages, though: huge pages are rather
2265 * unlikely to succeed so close to the limit, and we fall back
2266 * to regular pages anyway in case of failure.
2268 if (nr_pages
== 1 && ret
)
2269 return CHARGE_RETRY
;
2272 * At task move, charge accounts can be doubly counted. So, it's
2273 * better to wait until the end of task_move if something is going on.
2275 if (mem_cgroup_wait_acct_move(mem_over_limit
))
2276 return CHARGE_RETRY
;
2278 /* If we don't need to call oom-killer at el, return immediately */
2280 return CHARGE_NOMEM
;
2282 if (!mem_cgroup_handle_oom(mem_over_limit
, gfp_mask
))
2283 return CHARGE_OOM_DIE
;
2285 return CHARGE_RETRY
;
2289 * Unlike exported interface, "oom" parameter is added. if oom==true,
2290 * oom-killer can be invoked.
2292 static int __mem_cgroup_try_charge(struct mm_struct
*mm
,
2294 unsigned int nr_pages
,
2295 struct mem_cgroup
**ptr
,
2298 unsigned int batch
= max(CHARGE_BATCH
, nr_pages
);
2299 int nr_oom_retries
= MEM_CGROUP_RECLAIM_RETRIES
;
2300 struct mem_cgroup
*memcg
= NULL
;
2304 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2305 * in system level. So, allow to go ahead dying process in addition to
2308 if (unlikely(test_thread_flag(TIF_MEMDIE
)
2309 || fatal_signal_pending(current
)))
2313 * We always charge the cgroup the mm_struct belongs to.
2314 * The mm_struct's mem_cgroup changes on task migration if the
2315 * thread group leader migrates. It's possible that mm is not
2316 * set, if so charge the init_mm (happens for pagecache usage).
2321 if (*ptr
) { /* css should be a valid one */
2323 VM_BUG_ON(css_is_removed(&memcg
->css
));
2324 if (mem_cgroup_is_root(memcg
))
2326 if (nr_pages
== 1 && consume_stock(memcg
))
2328 css_get(&memcg
->css
);
2330 struct task_struct
*p
;
2333 p
= rcu_dereference(mm
->owner
);
2335 * Because we don't have task_lock(), "p" can exit.
2336 * In that case, "memcg" can point to root or p can be NULL with
2337 * race with swapoff. Then, we have small risk of mis-accouning.
2338 * But such kind of mis-account by race always happens because
2339 * we don't have cgroup_mutex(). It's overkill and we allo that
2341 * (*) swapoff at el will charge against mm-struct not against
2342 * task-struct. So, mm->owner can be NULL.
2344 memcg
= mem_cgroup_from_task(p
);
2345 if (!memcg
|| mem_cgroup_is_root(memcg
)) {
2349 if (nr_pages
== 1 && consume_stock(memcg
)) {
2351 * It seems dagerous to access memcg without css_get().
2352 * But considering how consume_stok works, it's not
2353 * necessary. If consume_stock success, some charges
2354 * from this memcg are cached on this cpu. So, we
2355 * don't need to call css_get()/css_tryget() before
2356 * calling consume_stock().
2361 /* after here, we may be blocked. we need to get refcnt */
2362 if (!css_tryget(&memcg
->css
)) {
2372 /* If killed, bypass charge */
2373 if (fatal_signal_pending(current
)) {
2374 css_put(&memcg
->css
);
2379 if (oom
&& !nr_oom_retries
) {
2381 nr_oom_retries
= MEM_CGROUP_RECLAIM_RETRIES
;
2384 ret
= mem_cgroup_do_charge(memcg
, gfp_mask
, batch
, oom_check
);
2388 case CHARGE_RETRY
: /* not in OOM situation but retry */
2390 css_put(&memcg
->css
);
2393 case CHARGE_WOULDBLOCK
: /* !__GFP_WAIT */
2394 css_put(&memcg
->css
);
2396 case CHARGE_NOMEM
: /* OOM routine works */
2398 css_put(&memcg
->css
);
2401 /* If oom, we never return -ENOMEM */
2404 case CHARGE_OOM_DIE
: /* Killed by OOM Killer */
2405 css_put(&memcg
->css
);
2408 } while (ret
!= CHARGE_OK
);
2410 if (batch
> nr_pages
)
2411 refill_stock(memcg
, batch
- nr_pages
);
2412 css_put(&memcg
->css
);
2425 * Somemtimes we have to undo a charge we got by try_charge().
2426 * This function is for that and do uncharge, put css's refcnt.
2427 * gotten by try_charge().
2429 static void __mem_cgroup_cancel_charge(struct mem_cgroup
*memcg
,
2430 unsigned int nr_pages
)
2432 if (!mem_cgroup_is_root(memcg
)) {
2433 unsigned long bytes
= nr_pages
* PAGE_SIZE
;
2435 res_counter_uncharge(&memcg
->res
, bytes
);
2436 if (do_swap_account
)
2437 res_counter_uncharge(&memcg
->memsw
, bytes
);
2442 * A helper function to get mem_cgroup from ID. must be called under
2443 * rcu_read_lock(). The caller must check css_is_removed() or some if
2444 * it's concern. (dropping refcnt from swap can be called against removed
2447 static struct mem_cgroup
*mem_cgroup_lookup(unsigned short id
)
2449 struct cgroup_subsys_state
*css
;
2451 /* ID 0 is unused ID */
2454 css
= css_lookup(&mem_cgroup_subsys
, id
);
2457 return container_of(css
, struct mem_cgroup
, css
);
2460 struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
2462 struct mem_cgroup
*memcg
= NULL
;
2463 struct page_cgroup
*pc
;
2467 VM_BUG_ON(!PageLocked(page
));
2469 pc
= lookup_page_cgroup(page
);
2470 lock_page_cgroup(pc
);
2471 if (PageCgroupUsed(pc
)) {
2472 memcg
= pc
->mem_cgroup
;
2473 if (memcg
&& !css_tryget(&memcg
->css
))
2475 } else if (PageSwapCache(page
)) {
2476 ent
.val
= page_private(page
);
2477 id
= lookup_swap_cgroup_id(ent
);
2479 memcg
= mem_cgroup_lookup(id
);
2480 if (memcg
&& !css_tryget(&memcg
->css
))
2484 unlock_page_cgroup(pc
);
2488 static void __mem_cgroup_commit_charge(struct mem_cgroup
*memcg
,
2490 unsigned int nr_pages
,
2491 struct page_cgroup
*pc
,
2492 enum charge_type ctype
)
2494 lock_page_cgroup(pc
);
2495 if (unlikely(PageCgroupUsed(pc
))) {
2496 unlock_page_cgroup(pc
);
2497 __mem_cgroup_cancel_charge(memcg
, nr_pages
);
2501 * we don't need page_cgroup_lock about tail pages, becase they are not
2502 * accessed by any other context at this point.
2504 pc
->mem_cgroup
= memcg
;
2506 * We access a page_cgroup asynchronously without lock_page_cgroup().
2507 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2508 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2509 * before USED bit, we need memory barrier here.
2510 * See mem_cgroup_add_lru_list(), etc.
2514 case MEM_CGROUP_CHARGE_TYPE_CACHE
:
2515 case MEM_CGROUP_CHARGE_TYPE_SHMEM
:
2516 SetPageCgroupCache(pc
);
2517 SetPageCgroupUsed(pc
);
2519 case MEM_CGROUP_CHARGE_TYPE_MAPPED
:
2520 ClearPageCgroupCache(pc
);
2521 SetPageCgroupUsed(pc
);
2527 mem_cgroup_charge_statistics(memcg
, PageCgroupCache(pc
), nr_pages
);
2528 unlock_page_cgroup(pc
);
2530 * "charge_statistics" updated event counter. Then, check it.
2531 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2532 * if they exceeds softlimit.
2534 memcg_check_events(memcg
, page
);
2537 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2539 #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2540 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2542 * Because tail pages are not marked as "used", set it. We're under
2543 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2544 * charge/uncharge will be never happen and move_account() is done under
2545 * compound_lock(), so we don't have to take care of races.
2547 void mem_cgroup_split_huge_fixup(struct page
*head
)
2549 struct page_cgroup
*head_pc
= lookup_page_cgroup(head
);
2550 struct page_cgroup
*pc
;
2553 if (mem_cgroup_disabled())
2555 for (i
= 1; i
< HPAGE_PMD_NR
; i
++) {
2557 pc
->mem_cgroup
= head_pc
->mem_cgroup
;
2558 smp_wmb();/* see __commit_charge() */
2560 * LRU flags cannot be copied because we need to add tail
2561 * page to LRU by generic call and our hooks will be called.
2563 pc
->flags
= head_pc
->flags
& ~PCGF_NOCOPY_AT_SPLIT
;
2566 if (PageCgroupAcctLRU(head_pc
)) {
2568 struct mem_cgroup_per_zone
*mz
;
2570 * We hold lru_lock, then, reduce counter directly.
2572 lru
= page_lru(head
);
2573 mz
= page_cgroup_zoneinfo(head_pc
->mem_cgroup
, head
);
2574 MEM_CGROUP_ZSTAT(mz
, lru
) -= HPAGE_PMD_NR
- 1;
2580 * mem_cgroup_move_account - move account of the page
2582 * @nr_pages: number of regular pages (>1 for huge pages)
2583 * @pc: page_cgroup of the page.
2584 * @from: mem_cgroup which the page is moved from.
2585 * @to: mem_cgroup which the page is moved to. @from != @to.
2586 * @uncharge: whether we should call uncharge and css_put against @from.
2588 * The caller must confirm following.
2589 * - page is not on LRU (isolate_page() is useful.)
2590 * - compound_lock is held when nr_pages > 1
2592 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2593 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2594 * true, this function does "uncharge" from old cgroup, but it doesn't if
2595 * @uncharge is false, so a caller should do "uncharge".
2597 static int mem_cgroup_move_account(struct page
*page
,
2598 unsigned int nr_pages
,
2599 struct page_cgroup
*pc
,
2600 struct mem_cgroup
*from
,
2601 struct mem_cgroup
*to
,
2604 unsigned long flags
;
2607 VM_BUG_ON(from
== to
);
2608 VM_BUG_ON(PageLRU(page
));
2610 * The page is isolated from LRU. So, collapse function
2611 * will not handle this page. But page splitting can happen.
2612 * Do this check under compound_page_lock(). The caller should
2616 if (nr_pages
> 1 && !PageTransHuge(page
))
2619 lock_page_cgroup(pc
);
2622 if (!PageCgroupUsed(pc
) || pc
->mem_cgroup
!= from
)
2625 move_lock_page_cgroup(pc
, &flags
);
2627 if (PageCgroupFileMapped(pc
)) {
2628 /* Update mapped_file data for mem_cgroup */
2630 __this_cpu_dec(from
->stat
->count
[MEM_CGROUP_STAT_FILE_MAPPED
]);
2631 __this_cpu_inc(to
->stat
->count
[MEM_CGROUP_STAT_FILE_MAPPED
]);
2634 mem_cgroup_charge_statistics(from
, PageCgroupCache(pc
), -nr_pages
);
2636 /* This is not "cancel", but cancel_charge does all we need. */
2637 __mem_cgroup_cancel_charge(from
, nr_pages
);
2639 /* caller should have done css_get */
2640 pc
->mem_cgroup
= to
;
2641 mem_cgroup_charge_statistics(to
, PageCgroupCache(pc
), nr_pages
);
2643 * We charges against "to" which may not have any tasks. Then, "to"
2644 * can be under rmdir(). But in current implementation, caller of
2645 * this function is just force_empty() and move charge, so it's
2646 * guaranteed that "to" is never removed. So, we don't check rmdir
2649 move_unlock_page_cgroup(pc
, &flags
);
2652 unlock_page_cgroup(pc
);
2656 memcg_check_events(to
, page
);
2657 memcg_check_events(from
, page
);
2663 * move charges to its parent.
2666 static int mem_cgroup_move_parent(struct page
*page
,
2667 struct page_cgroup
*pc
,
2668 struct mem_cgroup
*child
,
2671 struct cgroup
*cg
= child
->css
.cgroup
;
2672 struct cgroup
*pcg
= cg
->parent
;
2673 struct mem_cgroup
*parent
;
2674 unsigned int nr_pages
;
2675 unsigned long uninitialized_var(flags
);
2683 if (!get_page_unless_zero(page
))
2685 if (isolate_lru_page(page
))
2688 nr_pages
= hpage_nr_pages(page
);
2690 parent
= mem_cgroup_from_cont(pcg
);
2691 ret
= __mem_cgroup_try_charge(NULL
, gfp_mask
, nr_pages
, &parent
, false);
2696 flags
= compound_lock_irqsave(page
);
2698 ret
= mem_cgroup_move_account(page
, nr_pages
, pc
, child
, parent
, true);
2700 __mem_cgroup_cancel_charge(parent
, nr_pages
);
2703 compound_unlock_irqrestore(page
, flags
);
2705 putback_lru_page(page
);
2713 * Charge the memory controller for page usage.
2715 * 0 if the charge was successful
2716 * < 0 if the cgroup is over its limit
2718 static int mem_cgroup_charge_common(struct page
*page
, struct mm_struct
*mm
,
2719 gfp_t gfp_mask
, enum charge_type ctype
)
2721 struct mem_cgroup
*memcg
= NULL
;
2722 unsigned int nr_pages
= 1;
2723 struct page_cgroup
*pc
;
2727 if (PageTransHuge(page
)) {
2728 nr_pages
<<= compound_order(page
);
2729 VM_BUG_ON(!PageTransHuge(page
));
2731 * Never OOM-kill a process for a huge page. The
2732 * fault handler will fall back to regular pages.
2737 pc
= lookup_page_cgroup(page
);
2738 ret
= __mem_cgroup_try_charge(mm
, gfp_mask
, nr_pages
, &memcg
, oom
);
2742 __mem_cgroup_commit_charge(memcg
, page
, nr_pages
, pc
, ctype
);
2746 int mem_cgroup_newpage_charge(struct page
*page
,
2747 struct mm_struct
*mm
, gfp_t gfp_mask
)
2749 if (mem_cgroup_disabled())
2751 VM_BUG_ON(page_mapped(page
));
2752 VM_BUG_ON(page
->mapping
&& !PageAnon(page
));
2754 return mem_cgroup_charge_common(page
, mm
, gfp_mask
,
2755 MEM_CGROUP_CHARGE_TYPE_MAPPED
);
2759 __mem_cgroup_commit_charge_swapin(struct page
*page
, struct mem_cgroup
*ptr
,
2760 enum charge_type ctype
);
2763 __mem_cgroup_commit_charge_lrucare(struct page
*page
, struct mem_cgroup
*memcg
,
2764 enum charge_type ctype
)
2766 struct page_cgroup
*pc
= lookup_page_cgroup(page
);
2768 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2769 * is already on LRU. It means the page may on some other page_cgroup's
2770 * LRU. Take care of it.
2772 mem_cgroup_lru_del_before_commit(page
);
2773 __mem_cgroup_commit_charge(memcg
, page
, 1, pc
, ctype
);
2774 mem_cgroup_lru_add_after_commit(page
);
2778 int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
2781 struct mem_cgroup
*memcg
= NULL
;
2784 if (mem_cgroup_disabled())
2786 if (PageCompound(page
))
2792 if (page_is_file_cache(page
)) {
2793 ret
= __mem_cgroup_try_charge(mm
, gfp_mask
, 1, &memcg
, true);
2798 * FUSE reuses pages without going through the final
2799 * put that would remove them from the LRU list, make
2800 * sure that they get relinked properly.
2802 __mem_cgroup_commit_charge_lrucare(page
, memcg
,
2803 MEM_CGROUP_CHARGE_TYPE_CACHE
);
2807 if (PageSwapCache(page
)) {
2808 ret
= mem_cgroup_try_charge_swapin(mm
, page
, gfp_mask
, &memcg
);
2810 __mem_cgroup_commit_charge_swapin(page
, memcg
,
2811 MEM_CGROUP_CHARGE_TYPE_SHMEM
);
2813 ret
= mem_cgroup_charge_common(page
, mm
, gfp_mask
,
2814 MEM_CGROUP_CHARGE_TYPE_SHMEM
);
2820 * While swap-in, try_charge -> commit or cancel, the page is locked.
2821 * And when try_charge() successfully returns, one refcnt to memcg without
2822 * struct page_cgroup is acquired. This refcnt will be consumed by
2823 * "commit()" or removed by "cancel()"
2825 int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
2827 gfp_t mask
, struct mem_cgroup
**memcgp
)
2829 struct mem_cgroup
*memcg
;
2834 if (mem_cgroup_disabled())
2837 if (!do_swap_account
)
2840 * A racing thread's fault, or swapoff, may have already updated
2841 * the pte, and even removed page from swap cache: in those cases
2842 * do_swap_page()'s pte_same() test will fail; but there's also a
2843 * KSM case which does need to charge the page.
2845 if (!PageSwapCache(page
))
2847 memcg
= try_get_mem_cgroup_from_page(page
);
2851 ret
= __mem_cgroup_try_charge(NULL
, mask
, 1, memcgp
, true);
2852 css_put(&memcg
->css
);
2857 return __mem_cgroup_try_charge(mm
, mask
, 1, memcgp
, true);
2861 __mem_cgroup_commit_charge_swapin(struct page
*page
, struct mem_cgroup
*memcg
,
2862 enum charge_type ctype
)
2864 if (mem_cgroup_disabled())
2868 cgroup_exclude_rmdir(&memcg
->css
);
2870 __mem_cgroup_commit_charge_lrucare(page
, memcg
, ctype
);
2872 * Now swap is on-memory. This means this page may be
2873 * counted both as mem and swap....double count.
2874 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2875 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2876 * may call delete_from_swap_cache() before reach here.
2878 if (do_swap_account
&& PageSwapCache(page
)) {
2879 swp_entry_t ent
= {.val
= page_private(page
)};
2880 struct mem_cgroup
*swap_memcg
;
2883 id
= swap_cgroup_record(ent
, 0);
2885 swap_memcg
= mem_cgroup_lookup(id
);
2888 * This recorded memcg can be obsolete one. So, avoid
2889 * calling css_tryget
2891 if (!mem_cgroup_is_root(swap_memcg
))
2892 res_counter_uncharge(&swap_memcg
->memsw
,
2894 mem_cgroup_swap_statistics(swap_memcg
, false);
2895 mem_cgroup_put(swap_memcg
);
2900 * At swapin, we may charge account against cgroup which has no tasks.
2901 * So, rmdir()->pre_destroy() can be called while we do this charge.
2902 * In that case, we need to call pre_destroy() again. check it here.
2904 cgroup_release_and_wakeup_rmdir(&memcg
->css
);
2907 void mem_cgroup_commit_charge_swapin(struct page
*page
,
2908 struct mem_cgroup
*memcg
)
2910 __mem_cgroup_commit_charge_swapin(page
, memcg
,
2911 MEM_CGROUP_CHARGE_TYPE_MAPPED
);
2914 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*memcg
)
2916 if (mem_cgroup_disabled())
2920 __mem_cgroup_cancel_charge(memcg
, 1);
2923 static void mem_cgroup_do_uncharge(struct mem_cgroup
*memcg
,
2924 unsigned int nr_pages
,
2925 const enum charge_type ctype
)
2927 struct memcg_batch_info
*batch
= NULL
;
2928 bool uncharge_memsw
= true;
2930 /* If swapout, usage of swap doesn't decrease */
2931 if (!do_swap_account
|| ctype
== MEM_CGROUP_CHARGE_TYPE_SWAPOUT
)
2932 uncharge_memsw
= false;
2934 batch
= ¤t
->memcg_batch
;
2936 * In usual, we do css_get() when we remember memcg pointer.
2937 * But in this case, we keep res->usage until end of a series of
2938 * uncharges. Then, it's ok to ignore memcg's refcnt.
2941 batch
->memcg
= memcg
;
2943 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2944 * In those cases, all pages freed continuously can be expected to be in
2945 * the same cgroup and we have chance to coalesce uncharges.
2946 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2947 * because we want to do uncharge as soon as possible.
2950 if (!batch
->do_batch
|| test_thread_flag(TIF_MEMDIE
))
2951 goto direct_uncharge
;
2954 goto direct_uncharge
;
2957 * In typical case, batch->memcg == mem. This means we can
2958 * merge a series of uncharges to an uncharge of res_counter.
2959 * If not, we uncharge res_counter ony by one.
2961 if (batch
->memcg
!= memcg
)
2962 goto direct_uncharge
;
2963 /* remember freed charge and uncharge it later */
2966 batch
->memsw_nr_pages
++;
2969 res_counter_uncharge(&memcg
->res
, nr_pages
* PAGE_SIZE
);
2971 res_counter_uncharge(&memcg
->memsw
, nr_pages
* PAGE_SIZE
);
2972 if (unlikely(batch
->memcg
!= memcg
))
2973 memcg_oom_recover(memcg
);
2978 * uncharge if !page_mapped(page)
2980 static struct mem_cgroup
*
2981 __mem_cgroup_uncharge_common(struct page
*page
, enum charge_type ctype
)
2983 struct mem_cgroup
*memcg
= NULL
;
2984 unsigned int nr_pages
= 1;
2985 struct page_cgroup
*pc
;
2987 if (mem_cgroup_disabled())
2990 if (PageSwapCache(page
))
2993 if (PageTransHuge(page
)) {
2994 nr_pages
<<= compound_order(page
);
2995 VM_BUG_ON(!PageTransHuge(page
));
2998 * Check if our page_cgroup is valid
3000 pc
= lookup_page_cgroup(page
);
3001 if (unlikely(!PageCgroupUsed(pc
)))
3004 lock_page_cgroup(pc
);
3006 memcg
= pc
->mem_cgroup
;
3008 if (!PageCgroupUsed(pc
))
3012 case MEM_CGROUP_CHARGE_TYPE_MAPPED
:
3013 case MEM_CGROUP_CHARGE_TYPE_DROP
:
3014 /* See mem_cgroup_prepare_migration() */
3015 if (page_mapped(page
) || PageCgroupMigration(pc
))
3018 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT
:
3019 if (!PageAnon(page
)) { /* Shared memory */
3020 if (page
->mapping
&& !page_is_file_cache(page
))
3022 } else if (page_mapped(page
)) /* Anon */
3029 mem_cgroup_charge_statistics(memcg
, PageCgroupCache(pc
), -nr_pages
);
3031 ClearPageCgroupUsed(pc
);
3033 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3034 * freed from LRU. This is safe because uncharged page is expected not
3035 * to be reused (freed soon). Exception is SwapCache, it's handled by
3036 * special functions.
3039 unlock_page_cgroup(pc
);
3041 * even after unlock, we have memcg->res.usage here and this memcg
3042 * will never be freed.
3044 memcg_check_events(memcg
, page
);
3045 if (do_swap_account
&& ctype
== MEM_CGROUP_CHARGE_TYPE_SWAPOUT
) {
3046 mem_cgroup_swap_statistics(memcg
, true);
3047 mem_cgroup_get(memcg
);
3049 if (!mem_cgroup_is_root(memcg
))
3050 mem_cgroup_do_uncharge(memcg
, nr_pages
, ctype
);
3055 unlock_page_cgroup(pc
);
3059 void mem_cgroup_uncharge_page(struct page
*page
)
3062 if (page_mapped(page
))
3064 VM_BUG_ON(page
->mapping
&& !PageAnon(page
));
3065 __mem_cgroup_uncharge_common(page
, MEM_CGROUP_CHARGE_TYPE_MAPPED
);
3068 void mem_cgroup_uncharge_cache_page(struct page
*page
)
3070 VM_BUG_ON(page_mapped(page
));
3071 VM_BUG_ON(page
->mapping
);
3072 __mem_cgroup_uncharge_common(page
, MEM_CGROUP_CHARGE_TYPE_CACHE
);
3076 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3077 * In that cases, pages are freed continuously and we can expect pages
3078 * are in the same memcg. All these calls itself limits the number of
3079 * pages freed at once, then uncharge_start/end() is called properly.
3080 * This may be called prural(2) times in a context,
3083 void mem_cgroup_uncharge_start(void)
3085 current
->memcg_batch
.do_batch
++;
3086 /* We can do nest. */
3087 if (current
->memcg_batch
.do_batch
== 1) {
3088 current
->memcg_batch
.memcg
= NULL
;
3089 current
->memcg_batch
.nr_pages
= 0;
3090 current
->memcg_batch
.memsw_nr_pages
= 0;
3094 void mem_cgroup_uncharge_end(void)
3096 struct memcg_batch_info
*batch
= ¤t
->memcg_batch
;
3098 if (!batch
->do_batch
)
3102 if (batch
->do_batch
) /* If stacked, do nothing. */
3108 * This "batch->memcg" is valid without any css_get/put etc...
3109 * bacause we hide charges behind us.
3111 if (batch
->nr_pages
)
3112 res_counter_uncharge(&batch
->memcg
->res
,
3113 batch
->nr_pages
* PAGE_SIZE
);
3114 if (batch
->memsw_nr_pages
)
3115 res_counter_uncharge(&batch
->memcg
->memsw
,
3116 batch
->memsw_nr_pages
* PAGE_SIZE
);
3117 memcg_oom_recover(batch
->memcg
);
3118 /* forget this pointer (for sanity check) */
3119 batch
->memcg
= NULL
;
3124 * called after __delete_from_swap_cache() and drop "page" account.
3125 * memcg information is recorded to swap_cgroup of "ent"
3128 mem_cgroup_uncharge_swapcache(struct page
*page
, swp_entry_t ent
, bool swapout
)
3130 struct mem_cgroup
*memcg
;
3131 int ctype
= MEM_CGROUP_CHARGE_TYPE_SWAPOUT
;
3133 if (!swapout
) /* this was a swap cache but the swap is unused ! */
3134 ctype
= MEM_CGROUP_CHARGE_TYPE_DROP
;
3136 memcg
= __mem_cgroup_uncharge_common(page
, ctype
);
3139 * record memcg information, if swapout && memcg != NULL,
3140 * mem_cgroup_get() was called in uncharge().
3142 if (do_swap_account
&& swapout
&& memcg
)
3143 swap_cgroup_record(ent
, css_id(&memcg
->css
));
3147 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3149 * called from swap_entry_free(). remove record in swap_cgroup and
3150 * uncharge "memsw" account.
3152 void mem_cgroup_uncharge_swap(swp_entry_t ent
)
3154 struct mem_cgroup
*memcg
;
3157 if (!do_swap_account
)
3160 id
= swap_cgroup_record(ent
, 0);
3162 memcg
= mem_cgroup_lookup(id
);
3165 * We uncharge this because swap is freed.
3166 * This memcg can be obsolete one. We avoid calling css_tryget
3168 if (!mem_cgroup_is_root(memcg
))
3169 res_counter_uncharge(&memcg
->memsw
, PAGE_SIZE
);
3170 mem_cgroup_swap_statistics(memcg
, false);
3171 mem_cgroup_put(memcg
);
3177 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3178 * @entry: swap entry to be moved
3179 * @from: mem_cgroup which the entry is moved from
3180 * @to: mem_cgroup which the entry is moved to
3181 * @need_fixup: whether we should fixup res_counters and refcounts.
3183 * It succeeds only when the swap_cgroup's record for this entry is the same
3184 * as the mem_cgroup's id of @from.
3186 * Returns 0 on success, -EINVAL on failure.
3188 * The caller must have charged to @to, IOW, called res_counter_charge() about
3189 * both res and memsw, and called css_get().
3191 static int mem_cgroup_move_swap_account(swp_entry_t entry
,
3192 struct mem_cgroup
*from
, struct mem_cgroup
*to
, bool need_fixup
)
3194 unsigned short old_id
, new_id
;
3196 old_id
= css_id(&from
->css
);
3197 new_id
= css_id(&to
->css
);
3199 if (swap_cgroup_cmpxchg(entry
, old_id
, new_id
) == old_id
) {
3200 mem_cgroup_swap_statistics(from
, false);
3201 mem_cgroup_swap_statistics(to
, true);
3203 * This function is only called from task migration context now.
3204 * It postpones res_counter and refcount handling till the end
3205 * of task migration(mem_cgroup_clear_mc()) for performance
3206 * improvement. But we cannot postpone mem_cgroup_get(to)
3207 * because if the process that has been moved to @to does
3208 * swap-in, the refcount of @to might be decreased to 0.
3212 if (!mem_cgroup_is_root(from
))
3213 res_counter_uncharge(&from
->memsw
, PAGE_SIZE
);
3214 mem_cgroup_put(from
);
3216 * we charged both to->res and to->memsw, so we should
3219 if (!mem_cgroup_is_root(to
))
3220 res_counter_uncharge(&to
->res
, PAGE_SIZE
);
3227 static inline int mem_cgroup_move_swap_account(swp_entry_t entry
,
3228 struct mem_cgroup
*from
, struct mem_cgroup
*to
, bool need_fixup
)
3235 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3238 int mem_cgroup_prepare_migration(struct page
*page
,
3239 struct page
*newpage
, struct mem_cgroup
**memcgp
, gfp_t gfp_mask
)
3241 struct mem_cgroup
*memcg
= NULL
;
3242 struct page_cgroup
*pc
;
3243 enum charge_type ctype
;
3248 VM_BUG_ON(PageTransHuge(page
));
3249 if (mem_cgroup_disabled())
3252 pc
= lookup_page_cgroup(page
);
3253 lock_page_cgroup(pc
);
3254 if (PageCgroupUsed(pc
)) {
3255 memcg
= pc
->mem_cgroup
;
3256 css_get(&memcg
->css
);
3258 * At migrating an anonymous page, its mapcount goes down
3259 * to 0 and uncharge() will be called. But, even if it's fully
3260 * unmapped, migration may fail and this page has to be
3261 * charged again. We set MIGRATION flag here and delay uncharge
3262 * until end_migration() is called
3264 * Corner Case Thinking
3266 * When the old page was mapped as Anon and it's unmap-and-freed
3267 * while migration was ongoing.
3268 * If unmap finds the old page, uncharge() of it will be delayed
3269 * until end_migration(). If unmap finds a new page, it's
3270 * uncharged when it make mapcount to be 1->0. If unmap code
3271 * finds swap_migration_entry, the new page will not be mapped
3272 * and end_migration() will find it(mapcount==0).
3275 * When the old page was mapped but migraion fails, the kernel
3276 * remaps it. A charge for it is kept by MIGRATION flag even
3277 * if mapcount goes down to 0. We can do remap successfully
3278 * without charging it again.
3281 * The "old" page is under lock_page() until the end of
3282 * migration, so, the old page itself will not be swapped-out.
3283 * If the new page is swapped out before end_migraton, our
3284 * hook to usual swap-out path will catch the event.
3287 SetPageCgroupMigration(pc
);
3289 unlock_page_cgroup(pc
);
3291 * If the page is not charged at this point,
3298 ret
= __mem_cgroup_try_charge(NULL
, gfp_mask
, 1, memcgp
, false);
3299 css_put(&memcg
->css
);/* drop extra refcnt */
3300 if (ret
|| *memcgp
== NULL
) {
3301 if (PageAnon(page
)) {
3302 lock_page_cgroup(pc
);
3303 ClearPageCgroupMigration(pc
);
3304 unlock_page_cgroup(pc
);
3306 * The old page may be fully unmapped while we kept it.
3308 mem_cgroup_uncharge_page(page
);
3313 * We charge new page before it's used/mapped. So, even if unlock_page()
3314 * is called before end_migration, we can catch all events on this new
3315 * page. In the case new page is migrated but not remapped, new page's
3316 * mapcount will be finally 0 and we call uncharge in end_migration().
3318 pc
= lookup_page_cgroup(newpage
);
3320 ctype
= MEM_CGROUP_CHARGE_TYPE_MAPPED
;
3321 else if (page_is_file_cache(page
))
3322 ctype
= MEM_CGROUP_CHARGE_TYPE_CACHE
;
3324 ctype
= MEM_CGROUP_CHARGE_TYPE_SHMEM
;
3325 __mem_cgroup_commit_charge(memcg
, page
, 1, pc
, ctype
);
3329 /* remove redundant charge if migration failed*/
3330 void mem_cgroup_end_migration(struct mem_cgroup
*memcg
,
3331 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
3333 struct page
*used
, *unused
;
3334 struct page_cgroup
*pc
;
3338 /* blocks rmdir() */
3339 cgroup_exclude_rmdir(&memcg
->css
);
3340 if (!migration_ok
) {
3348 * We disallowed uncharge of pages under migration because mapcount
3349 * of the page goes down to zero, temporarly.
3350 * Clear the flag and check the page should be charged.
3352 pc
= lookup_page_cgroup(oldpage
);
3353 lock_page_cgroup(pc
);
3354 ClearPageCgroupMigration(pc
);
3355 unlock_page_cgroup(pc
);
3357 __mem_cgroup_uncharge_common(unused
, MEM_CGROUP_CHARGE_TYPE_FORCE
);
3360 * If a page is a file cache, radix-tree replacement is very atomic
3361 * and we can skip this check. When it was an Anon page, its mapcount
3362 * goes down to 0. But because we added MIGRATION flage, it's not
3363 * uncharged yet. There are several case but page->mapcount check
3364 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3365 * check. (see prepare_charge() also)
3368 mem_cgroup_uncharge_page(used
);
3370 * At migration, we may charge account against cgroup which has no
3372 * So, rmdir()->pre_destroy() can be called while we do this charge.
3373 * In that case, we need to call pre_destroy() again. check it here.
3375 cgroup_release_and_wakeup_rmdir(&memcg
->css
);
3379 * At replace page cache, newpage is not under any memcg but it's on
3380 * LRU. So, this function doesn't touch res_counter but handles LRU
3381 * in correct way. Both pages are locked so we cannot race with uncharge.
3383 void mem_cgroup_replace_page_cache(struct page
*oldpage
,
3384 struct page
*newpage
)
3386 struct mem_cgroup
*memcg
;
3387 struct page_cgroup
*pc
;
3389 enum charge_type type
= MEM_CGROUP_CHARGE_TYPE_CACHE
;
3390 unsigned long flags
;
3392 if (mem_cgroup_disabled())
3395 pc
= lookup_page_cgroup(oldpage
);
3396 /* fix accounting on old pages */
3397 lock_page_cgroup(pc
);
3398 memcg
= pc
->mem_cgroup
;
3399 mem_cgroup_charge_statistics(memcg
, PageCgroupCache(pc
), -1);
3400 ClearPageCgroupUsed(pc
);
3401 unlock_page_cgroup(pc
);
3403 if (PageSwapBacked(oldpage
))
3404 type
= MEM_CGROUP_CHARGE_TYPE_SHMEM
;
3406 zone
= page_zone(newpage
);
3407 pc
= lookup_page_cgroup(newpage
);
3409 * Even if newpage->mapping was NULL before starting replacement,
3410 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3411 * LRU while we overwrite pc->mem_cgroup.
3413 spin_lock_irqsave(&zone
->lru_lock
, flags
);
3414 if (PageLRU(newpage
))
3415 del_page_from_lru_list(zone
, newpage
, page_lru(newpage
));
3416 __mem_cgroup_commit_charge(memcg
, newpage
, 1, pc
, type
);
3417 if (PageLRU(newpage
))
3418 add_page_to_lru_list(zone
, newpage
, page_lru(newpage
));
3419 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
3422 #ifdef CONFIG_DEBUG_VM
3423 static struct page_cgroup
*lookup_page_cgroup_used(struct page
*page
)
3425 struct page_cgroup
*pc
;
3427 pc
= lookup_page_cgroup(page
);
3429 * Can be NULL while feeding pages into the page allocator for
3430 * the first time, i.e. during boot or memory hotplug;
3431 * or when mem_cgroup_disabled().
3433 if (likely(pc
) && PageCgroupUsed(pc
))
3438 bool mem_cgroup_bad_page_check(struct page
*page
)
3440 if (mem_cgroup_disabled())
3443 return lookup_page_cgroup_used(page
) != NULL
;
3446 void mem_cgroup_print_bad_page(struct page
*page
)
3448 struct page_cgroup
*pc
;
3450 pc
= lookup_page_cgroup_used(page
);
3455 printk(KERN_ALERT
"pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3456 pc
, pc
->flags
, pc
->mem_cgroup
);
3458 path
= kmalloc(PATH_MAX
, GFP_KERNEL
);
3461 ret
= cgroup_path(pc
->mem_cgroup
->css
.cgroup
,
3466 printk(KERN_CONT
"(%s)\n",
3467 (ret
< 0) ? "cannot get the path" : path
);
3473 static DEFINE_MUTEX(set_limit_mutex
);
3475 static int mem_cgroup_resize_limit(struct mem_cgroup
*memcg
,
3476 unsigned long long val
)
3479 u64 memswlimit
, memlimit
;
3481 int children
= mem_cgroup_count_children(memcg
);
3482 u64 curusage
, oldusage
;
3486 * For keeping hierarchical_reclaim simple, how long we should retry
3487 * is depends on callers. We set our retry-count to be function
3488 * of # of children which we should visit in this loop.
3490 retry_count
= MEM_CGROUP_RECLAIM_RETRIES
* children
;
3492 oldusage
= res_counter_read_u64(&memcg
->res
, RES_USAGE
);
3495 while (retry_count
) {
3496 if (signal_pending(current
)) {
3501 * Rather than hide all in some function, I do this in
3502 * open coded manner. You see what this really does.
3503 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3505 mutex_lock(&set_limit_mutex
);
3506 memswlimit
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
3507 if (memswlimit
< val
) {
3509 mutex_unlock(&set_limit_mutex
);
3513 memlimit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
3517 ret
= res_counter_set_limit(&memcg
->res
, val
);
3519 if (memswlimit
== val
)
3520 memcg
->memsw_is_minimum
= true;
3522 memcg
->memsw_is_minimum
= false;
3524 mutex_unlock(&set_limit_mutex
);
3529 mem_cgroup_reclaim(memcg
, GFP_KERNEL
,
3530 MEM_CGROUP_RECLAIM_SHRINK
);
3531 curusage
= res_counter_read_u64(&memcg
->res
, RES_USAGE
);
3532 /* Usage is reduced ? */
3533 if (curusage
>= oldusage
)
3536 oldusage
= curusage
;
3538 if (!ret
&& enlarge
)
3539 memcg_oom_recover(memcg
);
3544 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup
*memcg
,
3545 unsigned long long val
)
3548 u64 memlimit
, memswlimit
, oldusage
, curusage
;
3549 int children
= mem_cgroup_count_children(memcg
);
3553 /* see mem_cgroup_resize_res_limit */
3554 retry_count
= children
* MEM_CGROUP_RECLAIM_RETRIES
;
3555 oldusage
= res_counter_read_u64(&memcg
->memsw
, RES_USAGE
);
3556 while (retry_count
) {
3557 if (signal_pending(current
)) {
3562 * Rather than hide all in some function, I do this in
3563 * open coded manner. You see what this really does.
3564 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3566 mutex_lock(&set_limit_mutex
);
3567 memlimit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
3568 if (memlimit
> val
) {
3570 mutex_unlock(&set_limit_mutex
);
3573 memswlimit
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
3574 if (memswlimit
< val
)
3576 ret
= res_counter_set_limit(&memcg
->memsw
, val
);
3578 if (memlimit
== val
)
3579 memcg
->memsw_is_minimum
= true;
3581 memcg
->memsw_is_minimum
= false;
3583 mutex_unlock(&set_limit_mutex
);
3588 mem_cgroup_reclaim(memcg
, GFP_KERNEL
,
3589 MEM_CGROUP_RECLAIM_NOSWAP
|
3590 MEM_CGROUP_RECLAIM_SHRINK
);
3591 curusage
= res_counter_read_u64(&memcg
->memsw
, RES_USAGE
);
3592 /* Usage is reduced ? */
3593 if (curusage
>= oldusage
)
3596 oldusage
= curusage
;
3598 if (!ret
&& enlarge
)
3599 memcg_oom_recover(memcg
);
3603 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
3605 unsigned long *total_scanned
)
3607 unsigned long nr_reclaimed
= 0;
3608 struct mem_cgroup_per_zone
*mz
, *next_mz
= NULL
;
3609 unsigned long reclaimed
;
3611 struct mem_cgroup_tree_per_zone
*mctz
;
3612 unsigned long long excess
;
3613 unsigned long nr_scanned
;
3618 mctz
= soft_limit_tree_node_zone(zone_to_nid(zone
), zone_idx(zone
));
3620 * This loop can run a while, specially if mem_cgroup's continuously
3621 * keep exceeding their soft limit and putting the system under
3628 mz
= mem_cgroup_largest_soft_limit_node(mctz
);
3633 reclaimed
= mem_cgroup_soft_reclaim(mz
->mem
, zone
,
3634 gfp_mask
, &nr_scanned
);
3635 nr_reclaimed
+= reclaimed
;
3636 *total_scanned
+= nr_scanned
;
3637 spin_lock(&mctz
->lock
);
3640 * If we failed to reclaim anything from this memory cgroup
3641 * it is time to move on to the next cgroup
3647 * Loop until we find yet another one.
3649 * By the time we get the soft_limit lock
3650 * again, someone might have aded the
3651 * group back on the RB tree. Iterate to
3652 * make sure we get a different mem.
3653 * mem_cgroup_largest_soft_limit_node returns
3654 * NULL if no other cgroup is present on
3658 __mem_cgroup_largest_soft_limit_node(mctz
);
3660 css_put(&next_mz
->mem
->css
);
3661 else /* next_mz == NULL or other memcg */
3665 __mem_cgroup_remove_exceeded(mz
->mem
, mz
, mctz
);
3666 excess
= res_counter_soft_limit_excess(&mz
->mem
->res
);
3668 * One school of thought says that we should not add
3669 * back the node to the tree if reclaim returns 0.
3670 * But our reclaim could return 0, simply because due
3671 * to priority we are exposing a smaller subset of
3672 * memory to reclaim from. Consider this as a longer
3675 /* If excess == 0, no tree ops */
3676 __mem_cgroup_insert_exceeded(mz
->mem
, mz
, mctz
, excess
);
3677 spin_unlock(&mctz
->lock
);
3678 css_put(&mz
->mem
->css
);
3681 * Could not reclaim anything and there are no more
3682 * mem cgroups to try or we seem to be looping without
3683 * reclaiming anything.
3685 if (!nr_reclaimed
&&
3687 loop
> MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS
))
3689 } while (!nr_reclaimed
);
3691 css_put(&next_mz
->mem
->css
);
3692 return nr_reclaimed
;
3696 * This routine traverse page_cgroup in given list and drop them all.
3697 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3699 static int mem_cgroup_force_empty_list(struct mem_cgroup
*memcg
,
3700 int node
, int zid
, enum lru_list lru
)
3702 struct mem_cgroup_per_zone
*mz
;
3703 unsigned long flags
, loop
;
3704 struct list_head
*list
;
3709 zone
= &NODE_DATA(node
)->node_zones
[zid
];
3710 mz
= mem_cgroup_zoneinfo(memcg
, node
, zid
);
3711 list
= &mz
->lruvec
.lists
[lru
];
3713 loop
= MEM_CGROUP_ZSTAT(mz
, lru
);
3714 /* give some margin against EBUSY etc...*/
3718 struct page_cgroup
*pc
;
3722 spin_lock_irqsave(&zone
->lru_lock
, flags
);
3723 if (list_empty(list
)) {
3724 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
3727 page
= list_entry(list
->prev
, struct page
, lru
);
3729 list_move(&page
->lru
, list
);
3731 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
3734 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
3736 pc
= lookup_page_cgroup(page
);
3738 ret
= mem_cgroup_move_parent(page
, pc
, memcg
, GFP_KERNEL
);
3742 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
3743 /* found lock contention or "pc" is obsolete. */
3750 if (!ret
&& !list_empty(list
))
3756 * make mem_cgroup's charge to be 0 if there is no task.
3757 * This enables deleting this mem_cgroup.
3759 static int mem_cgroup_force_empty(struct mem_cgroup
*memcg
, bool free_all
)
3762 int node
, zid
, shrink
;
3763 int nr_retries
= MEM_CGROUP_RECLAIM_RETRIES
;
3764 struct cgroup
*cgrp
= memcg
->css
.cgroup
;
3766 css_get(&memcg
->css
);
3769 /* should free all ? */
3775 if (cgroup_task_count(cgrp
) || !list_empty(&cgrp
->children
))
3778 if (signal_pending(current
))
3780 /* This is for making all *used* pages to be on LRU. */
3781 lru_add_drain_all();
3782 drain_all_stock_sync(memcg
);
3784 mem_cgroup_start_move(memcg
);
3785 for_each_node_state(node
, N_HIGH_MEMORY
) {
3786 for (zid
= 0; !ret
&& zid
< MAX_NR_ZONES
; zid
++) {
3789 ret
= mem_cgroup_force_empty_list(memcg
,
3798 mem_cgroup_end_move(memcg
);
3799 memcg_oom_recover(memcg
);
3800 /* it seems parent cgroup doesn't have enough mem */
3804 /* "ret" should also be checked to ensure all lists are empty. */
3805 } while (memcg
->res
.usage
> 0 || ret
);
3807 css_put(&memcg
->css
);
3811 /* returns EBUSY if there is a task or if we come here twice. */
3812 if (cgroup_task_count(cgrp
) || !list_empty(&cgrp
->children
) || shrink
) {
3816 /* we call try-to-free pages for make this cgroup empty */
3817 lru_add_drain_all();
3818 /* try to free all pages in this cgroup */
3820 while (nr_retries
&& memcg
->res
.usage
> 0) {
3823 if (signal_pending(current
)) {
3827 progress
= try_to_free_mem_cgroup_pages(memcg
, GFP_KERNEL
,
3831 /* maybe some writeback is necessary */
3832 congestion_wait(BLK_RW_ASYNC
, HZ
/10);
3837 /* try move_account...there may be some *locked* pages. */
3841 int mem_cgroup_force_empty_write(struct cgroup
*cont
, unsigned int event
)
3843 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont
), true);
3847 static u64
mem_cgroup_hierarchy_read(struct cgroup
*cont
, struct cftype
*cft
)
3849 return mem_cgroup_from_cont(cont
)->use_hierarchy
;
3852 static int mem_cgroup_hierarchy_write(struct cgroup
*cont
, struct cftype
*cft
,
3856 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
3857 struct cgroup
*parent
= cont
->parent
;
3858 struct mem_cgroup
*parent_memcg
= NULL
;
3861 parent_memcg
= mem_cgroup_from_cont(parent
);
3865 * If parent's use_hierarchy is set, we can't make any modifications
3866 * in the child subtrees. If it is unset, then the change can
3867 * occur, provided the current cgroup has no children.
3869 * For the root cgroup, parent_mem is NULL, we allow value to be
3870 * set if there are no children.
3872 if ((!parent_memcg
|| !parent_memcg
->use_hierarchy
) &&
3873 (val
== 1 || val
== 0)) {
3874 if (list_empty(&cont
->children
))
3875 memcg
->use_hierarchy
= val
;
3886 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup
*memcg
,
3887 enum mem_cgroup_stat_index idx
)
3889 struct mem_cgroup
*iter
;
3892 /* Per-cpu values can be negative, use a signed accumulator */
3893 for_each_mem_cgroup_tree(iter
, memcg
)
3894 val
+= mem_cgroup_read_stat(iter
, idx
);
3896 if (val
< 0) /* race ? */
3901 static inline u64
mem_cgroup_usage(struct mem_cgroup
*memcg
, bool swap
)
3905 if (!mem_cgroup_is_root(memcg
)) {
3907 return res_counter_read_u64(&memcg
->res
, RES_USAGE
);
3909 return res_counter_read_u64(&memcg
->memsw
, RES_USAGE
);
3912 val
= mem_cgroup_recursive_stat(memcg
, MEM_CGROUP_STAT_CACHE
);
3913 val
+= mem_cgroup_recursive_stat(memcg
, MEM_CGROUP_STAT_RSS
);
3916 val
+= mem_cgroup_recursive_stat(memcg
, MEM_CGROUP_STAT_SWAPOUT
);
3918 return val
<< PAGE_SHIFT
;
3921 static u64
mem_cgroup_read(struct cgroup
*cont
, struct cftype
*cft
)
3923 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
3927 type
= MEMFILE_TYPE(cft
->private);
3928 name
= MEMFILE_ATTR(cft
->private);
3931 if (name
== RES_USAGE
)
3932 val
= mem_cgroup_usage(memcg
, false);
3934 val
= res_counter_read_u64(&memcg
->res
, name
);
3937 if (name
== RES_USAGE
)
3938 val
= mem_cgroup_usage(memcg
, true);
3940 val
= res_counter_read_u64(&memcg
->memsw
, name
);
3949 * The user of this function is...
3952 static int mem_cgroup_write(struct cgroup
*cont
, struct cftype
*cft
,
3955 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
3957 unsigned long long val
;
3960 type
= MEMFILE_TYPE(cft
->private);
3961 name
= MEMFILE_ATTR(cft
->private);
3964 if (mem_cgroup_is_root(memcg
)) { /* Can't set limit on root */
3968 /* This function does all necessary parse...reuse it */
3969 ret
= res_counter_memparse_write_strategy(buffer
, &val
);
3973 ret
= mem_cgroup_resize_limit(memcg
, val
);
3975 ret
= mem_cgroup_resize_memsw_limit(memcg
, val
);
3977 case RES_SOFT_LIMIT
:
3978 ret
= res_counter_memparse_write_strategy(buffer
, &val
);
3982 * For memsw, soft limits are hard to implement in terms
3983 * of semantics, for now, we support soft limits for
3984 * control without swap
3987 ret
= res_counter_set_soft_limit(&memcg
->res
, val
);
3992 ret
= -EINVAL
; /* should be BUG() ? */
3998 static void memcg_get_hierarchical_limit(struct mem_cgroup
*memcg
,
3999 unsigned long long *mem_limit
, unsigned long long *memsw_limit
)
4001 struct cgroup
*cgroup
;
4002 unsigned long long min_limit
, min_memsw_limit
, tmp
;
4004 min_limit
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
4005 min_memsw_limit
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
4006 cgroup
= memcg
->css
.cgroup
;
4007 if (!memcg
->use_hierarchy
)
4010 while (cgroup
->parent
) {
4011 cgroup
= cgroup
->parent
;
4012 memcg
= mem_cgroup_from_cont(cgroup
);
4013 if (!memcg
->use_hierarchy
)
4015 tmp
= res_counter_read_u64(&memcg
->res
, RES_LIMIT
);
4016 min_limit
= min(min_limit
, tmp
);
4017 tmp
= res_counter_read_u64(&memcg
->memsw
, RES_LIMIT
);
4018 min_memsw_limit
= min(min_memsw_limit
, tmp
);
4021 *mem_limit
= min_limit
;
4022 *memsw_limit
= min_memsw_limit
;
4026 static int mem_cgroup_reset(struct cgroup
*cont
, unsigned int event
)
4028 struct mem_cgroup
*memcg
;
4031 memcg
= mem_cgroup_from_cont(cont
);
4032 type
= MEMFILE_TYPE(event
);
4033 name
= MEMFILE_ATTR(event
);
4037 res_counter_reset_max(&memcg
->res
);
4039 res_counter_reset_max(&memcg
->memsw
);
4043 res_counter_reset_failcnt(&memcg
->res
);
4045 res_counter_reset_failcnt(&memcg
->memsw
);
4052 static u64
mem_cgroup_move_charge_read(struct cgroup
*cgrp
,
4055 return mem_cgroup_from_cont(cgrp
)->move_charge_at_immigrate
;
4059 static int mem_cgroup_move_charge_write(struct cgroup
*cgrp
,
4060 struct cftype
*cft
, u64 val
)
4062 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4064 if (val
>= (1 << NR_MOVE_TYPE
))
4067 * We check this value several times in both in can_attach() and
4068 * attach(), so we need cgroup lock to prevent this value from being
4072 memcg
->move_charge_at_immigrate
= val
;
4078 static int mem_cgroup_move_charge_write(struct cgroup
*cgrp
,
4079 struct cftype
*cft
, u64 val
)
4086 /* For read statistics */
4104 struct mcs_total_stat
{
4105 s64 stat
[NR_MCS_STAT
];
4111 } memcg_stat_strings
[NR_MCS_STAT
] = {
4112 {"cache", "total_cache"},
4113 {"rss", "total_rss"},
4114 {"mapped_file", "total_mapped_file"},
4115 {"pgpgin", "total_pgpgin"},
4116 {"pgpgout", "total_pgpgout"},
4117 {"swap", "total_swap"},
4118 {"pgfault", "total_pgfault"},
4119 {"pgmajfault", "total_pgmajfault"},
4120 {"inactive_anon", "total_inactive_anon"},
4121 {"active_anon", "total_active_anon"},
4122 {"inactive_file", "total_inactive_file"},
4123 {"active_file", "total_active_file"},
4124 {"unevictable", "total_unevictable"}
4129 mem_cgroup_get_local_stat(struct mem_cgroup
*memcg
, struct mcs_total_stat
*s
)
4134 val
= mem_cgroup_read_stat(memcg
, MEM_CGROUP_STAT_CACHE
);
4135 s
->stat
[MCS_CACHE
] += val
* PAGE_SIZE
;
4136 val
= mem_cgroup_read_stat(memcg
, MEM_CGROUP_STAT_RSS
);
4137 s
->stat
[MCS_RSS
] += val
* PAGE_SIZE
;
4138 val
= mem_cgroup_read_stat(memcg
, MEM_CGROUP_STAT_FILE_MAPPED
);
4139 s
->stat
[MCS_FILE_MAPPED
] += val
* PAGE_SIZE
;
4140 val
= mem_cgroup_read_events(memcg
, MEM_CGROUP_EVENTS_PGPGIN
);
4141 s
->stat
[MCS_PGPGIN
] += val
;
4142 val
= mem_cgroup_read_events(memcg
, MEM_CGROUP_EVENTS_PGPGOUT
);
4143 s
->stat
[MCS_PGPGOUT
] += val
;
4144 if (do_swap_account
) {
4145 val
= mem_cgroup_read_stat(memcg
, MEM_CGROUP_STAT_SWAPOUT
);
4146 s
->stat
[MCS_SWAP
] += val
* PAGE_SIZE
;
4148 val
= mem_cgroup_read_events(memcg
, MEM_CGROUP_EVENTS_PGFAULT
);
4149 s
->stat
[MCS_PGFAULT
] += val
;
4150 val
= mem_cgroup_read_events(memcg
, MEM_CGROUP_EVENTS_PGMAJFAULT
);
4151 s
->stat
[MCS_PGMAJFAULT
] += val
;
4154 val
= mem_cgroup_nr_lru_pages(memcg
, BIT(LRU_INACTIVE_ANON
));
4155 s
->stat
[MCS_INACTIVE_ANON
] += val
* PAGE_SIZE
;
4156 val
= mem_cgroup_nr_lru_pages(memcg
, BIT(LRU_ACTIVE_ANON
));
4157 s
->stat
[MCS_ACTIVE_ANON
] += val
* PAGE_SIZE
;
4158 val
= mem_cgroup_nr_lru_pages(memcg
, BIT(LRU_INACTIVE_FILE
));
4159 s
->stat
[MCS_INACTIVE_FILE
] += val
* PAGE_SIZE
;
4160 val
= mem_cgroup_nr_lru_pages(memcg
, BIT(LRU_ACTIVE_FILE
));
4161 s
->stat
[MCS_ACTIVE_FILE
] += val
* PAGE_SIZE
;
4162 val
= mem_cgroup_nr_lru_pages(memcg
, BIT(LRU_UNEVICTABLE
));
4163 s
->stat
[MCS_UNEVICTABLE
] += val
* PAGE_SIZE
;
4167 mem_cgroup_get_total_stat(struct mem_cgroup
*memcg
, struct mcs_total_stat
*s
)
4169 struct mem_cgroup
*iter
;
4171 for_each_mem_cgroup_tree(iter
, memcg
)
4172 mem_cgroup_get_local_stat(iter
, s
);
4176 static int mem_control_numa_stat_show(struct seq_file
*m
, void *arg
)
4179 unsigned long total_nr
, file_nr
, anon_nr
, unevictable_nr
;
4180 unsigned long node_nr
;
4181 struct cgroup
*cont
= m
->private;
4182 struct mem_cgroup
*mem_cont
= mem_cgroup_from_cont(cont
);
4184 total_nr
= mem_cgroup_nr_lru_pages(mem_cont
, LRU_ALL
);
4185 seq_printf(m
, "total=%lu", total_nr
);
4186 for_each_node_state(nid
, N_HIGH_MEMORY
) {
4187 node_nr
= mem_cgroup_node_nr_lru_pages(mem_cont
, nid
, LRU_ALL
);
4188 seq_printf(m
, " N%d=%lu", nid
, node_nr
);
4192 file_nr
= mem_cgroup_nr_lru_pages(mem_cont
, LRU_ALL_FILE
);
4193 seq_printf(m
, "file=%lu", file_nr
);
4194 for_each_node_state(nid
, N_HIGH_MEMORY
) {
4195 node_nr
= mem_cgroup_node_nr_lru_pages(mem_cont
, nid
,
4197 seq_printf(m
, " N%d=%lu", nid
, node_nr
);
4201 anon_nr
= mem_cgroup_nr_lru_pages(mem_cont
, LRU_ALL_ANON
);
4202 seq_printf(m
, "anon=%lu", anon_nr
);
4203 for_each_node_state(nid
, N_HIGH_MEMORY
) {
4204 node_nr
= mem_cgroup_node_nr_lru_pages(mem_cont
, nid
,
4206 seq_printf(m
, " N%d=%lu", nid
, node_nr
);
4210 unevictable_nr
= mem_cgroup_nr_lru_pages(mem_cont
, BIT(LRU_UNEVICTABLE
));
4211 seq_printf(m
, "unevictable=%lu", unevictable_nr
);
4212 for_each_node_state(nid
, N_HIGH_MEMORY
) {
4213 node_nr
= mem_cgroup_node_nr_lru_pages(mem_cont
, nid
,
4214 BIT(LRU_UNEVICTABLE
));
4215 seq_printf(m
, " N%d=%lu", nid
, node_nr
);
4220 #endif /* CONFIG_NUMA */
4222 static int mem_control_stat_show(struct cgroup
*cont
, struct cftype
*cft
,
4223 struct cgroup_map_cb
*cb
)
4225 struct mem_cgroup
*mem_cont
= mem_cgroup_from_cont(cont
);
4226 struct mcs_total_stat mystat
;
4229 memset(&mystat
, 0, sizeof(mystat
));
4230 mem_cgroup_get_local_stat(mem_cont
, &mystat
);
4233 for (i
= 0; i
< NR_MCS_STAT
; i
++) {
4234 if (i
== MCS_SWAP
&& !do_swap_account
)
4236 cb
->fill(cb
, memcg_stat_strings
[i
].local_name
, mystat
.stat
[i
]);
4239 /* Hierarchical information */
4241 unsigned long long limit
, memsw_limit
;
4242 memcg_get_hierarchical_limit(mem_cont
, &limit
, &memsw_limit
);
4243 cb
->fill(cb
, "hierarchical_memory_limit", limit
);
4244 if (do_swap_account
)
4245 cb
->fill(cb
, "hierarchical_memsw_limit", memsw_limit
);
4248 memset(&mystat
, 0, sizeof(mystat
));
4249 mem_cgroup_get_total_stat(mem_cont
, &mystat
);
4250 for (i
= 0; i
< NR_MCS_STAT
; i
++) {
4251 if (i
== MCS_SWAP
&& !do_swap_account
)
4253 cb
->fill(cb
, memcg_stat_strings
[i
].total_name
, mystat
.stat
[i
]);
4256 #ifdef CONFIG_DEBUG_VM
4259 struct mem_cgroup_per_zone
*mz
;
4260 unsigned long recent_rotated
[2] = {0, 0};
4261 unsigned long recent_scanned
[2] = {0, 0};
4263 for_each_online_node(nid
)
4264 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
4265 mz
= mem_cgroup_zoneinfo(mem_cont
, nid
, zid
);
4267 recent_rotated
[0] +=
4268 mz
->reclaim_stat
.recent_rotated
[0];
4269 recent_rotated
[1] +=
4270 mz
->reclaim_stat
.recent_rotated
[1];
4271 recent_scanned
[0] +=
4272 mz
->reclaim_stat
.recent_scanned
[0];
4273 recent_scanned
[1] +=
4274 mz
->reclaim_stat
.recent_scanned
[1];
4276 cb
->fill(cb
, "recent_rotated_anon", recent_rotated
[0]);
4277 cb
->fill(cb
, "recent_rotated_file", recent_rotated
[1]);
4278 cb
->fill(cb
, "recent_scanned_anon", recent_scanned
[0]);
4279 cb
->fill(cb
, "recent_scanned_file", recent_scanned
[1]);
4286 static u64
mem_cgroup_swappiness_read(struct cgroup
*cgrp
, struct cftype
*cft
)
4288 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4290 return mem_cgroup_swappiness(memcg
);
4293 static int mem_cgroup_swappiness_write(struct cgroup
*cgrp
, struct cftype
*cft
,
4296 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4297 struct mem_cgroup
*parent
;
4302 if (cgrp
->parent
== NULL
)
4305 parent
= mem_cgroup_from_cont(cgrp
->parent
);
4309 /* If under hierarchy, only empty-root can set this value */
4310 if ((parent
->use_hierarchy
) ||
4311 (memcg
->use_hierarchy
&& !list_empty(&cgrp
->children
))) {
4316 memcg
->swappiness
= val
;
4323 static void __mem_cgroup_threshold(struct mem_cgroup
*memcg
, bool swap
)
4325 struct mem_cgroup_threshold_ary
*t
;
4331 t
= rcu_dereference(memcg
->thresholds
.primary
);
4333 t
= rcu_dereference(memcg
->memsw_thresholds
.primary
);
4338 usage
= mem_cgroup_usage(memcg
, swap
);
4341 * current_threshold points to threshold just below usage.
4342 * If it's not true, a threshold was crossed after last
4343 * call of __mem_cgroup_threshold().
4345 i
= t
->current_threshold
;
4348 * Iterate backward over array of thresholds starting from
4349 * current_threshold and check if a threshold is crossed.
4350 * If none of thresholds below usage is crossed, we read
4351 * only one element of the array here.
4353 for (; i
>= 0 && unlikely(t
->entries
[i
].threshold
> usage
); i
--)
4354 eventfd_signal(t
->entries
[i
].eventfd
, 1);
4356 /* i = current_threshold + 1 */
4360 * Iterate forward over array of thresholds starting from
4361 * current_threshold+1 and check if a threshold is crossed.
4362 * If none of thresholds above usage is crossed, we read
4363 * only one element of the array here.
4365 for (; i
< t
->size
&& unlikely(t
->entries
[i
].threshold
<= usage
); i
++)
4366 eventfd_signal(t
->entries
[i
].eventfd
, 1);
4368 /* Update current_threshold */
4369 t
->current_threshold
= i
- 1;
4374 static void mem_cgroup_threshold(struct mem_cgroup
*memcg
)
4377 __mem_cgroup_threshold(memcg
, false);
4378 if (do_swap_account
)
4379 __mem_cgroup_threshold(memcg
, true);
4381 memcg
= parent_mem_cgroup(memcg
);
4385 static int compare_thresholds(const void *a
, const void *b
)
4387 const struct mem_cgroup_threshold
*_a
= a
;
4388 const struct mem_cgroup_threshold
*_b
= b
;
4390 return _a
->threshold
- _b
->threshold
;
4393 static int mem_cgroup_oom_notify_cb(struct mem_cgroup
*memcg
)
4395 struct mem_cgroup_eventfd_list
*ev
;
4397 list_for_each_entry(ev
, &memcg
->oom_notify
, list
)
4398 eventfd_signal(ev
->eventfd
, 1);
4402 static void mem_cgroup_oom_notify(struct mem_cgroup
*memcg
)
4404 struct mem_cgroup
*iter
;
4406 for_each_mem_cgroup_tree(iter
, memcg
)
4407 mem_cgroup_oom_notify_cb(iter
);
4410 static int mem_cgroup_usage_register_event(struct cgroup
*cgrp
,
4411 struct cftype
*cft
, struct eventfd_ctx
*eventfd
, const char *args
)
4413 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4414 struct mem_cgroup_thresholds
*thresholds
;
4415 struct mem_cgroup_threshold_ary
*new;
4416 int type
= MEMFILE_TYPE(cft
->private);
4417 u64 threshold
, usage
;
4420 ret
= res_counter_memparse_write_strategy(args
, &threshold
);
4424 mutex_lock(&memcg
->thresholds_lock
);
4427 thresholds
= &memcg
->thresholds
;
4428 else if (type
== _MEMSWAP
)
4429 thresholds
= &memcg
->memsw_thresholds
;
4433 usage
= mem_cgroup_usage(memcg
, type
== _MEMSWAP
);
4435 /* Check if a threshold crossed before adding a new one */
4436 if (thresholds
->primary
)
4437 __mem_cgroup_threshold(memcg
, type
== _MEMSWAP
);
4439 size
= thresholds
->primary
? thresholds
->primary
->size
+ 1 : 1;
4441 /* Allocate memory for new array of thresholds */
4442 new = kmalloc(sizeof(*new) + size
* sizeof(struct mem_cgroup_threshold
),
4450 /* Copy thresholds (if any) to new array */
4451 if (thresholds
->primary
) {
4452 memcpy(new->entries
, thresholds
->primary
->entries
, (size
- 1) *
4453 sizeof(struct mem_cgroup_threshold
));
4456 /* Add new threshold */
4457 new->entries
[size
- 1].eventfd
= eventfd
;
4458 new->entries
[size
- 1].threshold
= threshold
;
4460 /* Sort thresholds. Registering of new threshold isn't time-critical */
4461 sort(new->entries
, size
, sizeof(struct mem_cgroup_threshold
),
4462 compare_thresholds
, NULL
);
4464 /* Find current threshold */
4465 new->current_threshold
= -1;
4466 for (i
= 0; i
< size
; i
++) {
4467 if (new->entries
[i
].threshold
< usage
) {
4469 * new->current_threshold will not be used until
4470 * rcu_assign_pointer(), so it's safe to increment
4473 ++new->current_threshold
;
4477 /* Free old spare buffer and save old primary buffer as spare */
4478 kfree(thresholds
->spare
);
4479 thresholds
->spare
= thresholds
->primary
;
4481 rcu_assign_pointer(thresholds
->primary
, new);
4483 /* To be sure that nobody uses thresholds */
4487 mutex_unlock(&memcg
->thresholds_lock
);
4492 static void mem_cgroup_usage_unregister_event(struct cgroup
*cgrp
,
4493 struct cftype
*cft
, struct eventfd_ctx
*eventfd
)
4495 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4496 struct mem_cgroup_thresholds
*thresholds
;
4497 struct mem_cgroup_threshold_ary
*new;
4498 int type
= MEMFILE_TYPE(cft
->private);
4502 mutex_lock(&memcg
->thresholds_lock
);
4504 thresholds
= &memcg
->thresholds
;
4505 else if (type
== _MEMSWAP
)
4506 thresholds
= &memcg
->memsw_thresholds
;
4511 * Something went wrong if we trying to unregister a threshold
4512 * if we don't have thresholds
4514 BUG_ON(!thresholds
);
4516 usage
= mem_cgroup_usage(memcg
, type
== _MEMSWAP
);
4518 /* Check if a threshold crossed before removing */
4519 __mem_cgroup_threshold(memcg
, type
== _MEMSWAP
);
4521 /* Calculate new number of threshold */
4523 for (i
= 0; i
< thresholds
->primary
->size
; i
++) {
4524 if (thresholds
->primary
->entries
[i
].eventfd
!= eventfd
)
4528 new = thresholds
->spare
;
4530 /* Set thresholds array to NULL if we don't have thresholds */
4539 /* Copy thresholds and find current threshold */
4540 new->current_threshold
= -1;
4541 for (i
= 0, j
= 0; i
< thresholds
->primary
->size
; i
++) {
4542 if (thresholds
->primary
->entries
[i
].eventfd
== eventfd
)
4545 new->entries
[j
] = thresholds
->primary
->entries
[i
];
4546 if (new->entries
[j
].threshold
< usage
) {
4548 * new->current_threshold will not be used
4549 * until rcu_assign_pointer(), so it's safe to increment
4552 ++new->current_threshold
;
4558 /* Swap primary and spare array */
4559 thresholds
->spare
= thresholds
->primary
;
4560 rcu_assign_pointer(thresholds
->primary
, new);
4562 /* To be sure that nobody uses thresholds */
4565 mutex_unlock(&memcg
->thresholds_lock
);
4568 static int mem_cgroup_oom_register_event(struct cgroup
*cgrp
,
4569 struct cftype
*cft
, struct eventfd_ctx
*eventfd
, const char *args
)
4571 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4572 struct mem_cgroup_eventfd_list
*event
;
4573 int type
= MEMFILE_TYPE(cft
->private);
4575 BUG_ON(type
!= _OOM_TYPE
);
4576 event
= kmalloc(sizeof(*event
), GFP_KERNEL
);
4580 spin_lock(&memcg_oom_lock
);
4582 event
->eventfd
= eventfd
;
4583 list_add(&event
->list
, &memcg
->oom_notify
);
4585 /* already in OOM ? */
4586 if (atomic_read(&memcg
->under_oom
))
4587 eventfd_signal(eventfd
, 1);
4588 spin_unlock(&memcg_oom_lock
);
4593 static void mem_cgroup_oom_unregister_event(struct cgroup
*cgrp
,
4594 struct cftype
*cft
, struct eventfd_ctx
*eventfd
)
4596 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4597 struct mem_cgroup_eventfd_list
*ev
, *tmp
;
4598 int type
= MEMFILE_TYPE(cft
->private);
4600 BUG_ON(type
!= _OOM_TYPE
);
4602 spin_lock(&memcg_oom_lock
);
4604 list_for_each_entry_safe(ev
, tmp
, &memcg
->oom_notify
, list
) {
4605 if (ev
->eventfd
== eventfd
) {
4606 list_del(&ev
->list
);
4611 spin_unlock(&memcg_oom_lock
);
4614 static int mem_cgroup_oom_control_read(struct cgroup
*cgrp
,
4615 struct cftype
*cft
, struct cgroup_map_cb
*cb
)
4617 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4619 cb
->fill(cb
, "oom_kill_disable", memcg
->oom_kill_disable
);
4621 if (atomic_read(&memcg
->under_oom
))
4622 cb
->fill(cb
, "under_oom", 1);
4624 cb
->fill(cb
, "under_oom", 0);
4628 static int mem_cgroup_oom_control_write(struct cgroup
*cgrp
,
4629 struct cftype
*cft
, u64 val
)
4631 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgrp
);
4632 struct mem_cgroup
*parent
;
4634 /* cannot set to root cgroup and only 0 and 1 are allowed */
4635 if (!cgrp
->parent
|| !((val
== 0) || (val
== 1)))
4638 parent
= mem_cgroup_from_cont(cgrp
->parent
);
4641 /* oom-kill-disable is a flag for subhierarchy. */
4642 if ((parent
->use_hierarchy
) ||
4643 (memcg
->use_hierarchy
&& !list_empty(&cgrp
->children
))) {
4647 memcg
->oom_kill_disable
= val
;
4649 memcg_oom_recover(memcg
);
4655 static const struct file_operations mem_control_numa_stat_file_operations
= {
4657 .llseek
= seq_lseek
,
4658 .release
= single_release
,
4661 static int mem_control_numa_stat_open(struct inode
*unused
, struct file
*file
)
4663 struct cgroup
*cont
= file
->f_dentry
->d_parent
->d_fsdata
;
4665 file
->f_op
= &mem_control_numa_stat_file_operations
;
4666 return single_open(file
, mem_control_numa_stat_show
, cont
);
4668 #endif /* CONFIG_NUMA */
4670 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4671 static int register_kmem_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
4674 * Part of this would be better living in a separate allocation
4675 * function, leaving us with just the cgroup tree population work.
4676 * We, however, depend on state such as network's proto_list that
4677 * is only initialized after cgroup creation. I found the less
4678 * cumbersome way to deal with it to defer it all to populate time
4680 return mem_cgroup_sockets_init(cont
, ss
);
4683 static void kmem_cgroup_destroy(struct cgroup_subsys
*ss
,
4684 struct cgroup
*cont
)
4686 mem_cgroup_sockets_destroy(cont
, ss
);
4689 static int register_kmem_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
4694 static void kmem_cgroup_destroy(struct cgroup_subsys
*ss
,
4695 struct cgroup
*cont
)
4700 static struct cftype mem_cgroup_files
[] = {
4702 .name
= "usage_in_bytes",
4703 .private = MEMFILE_PRIVATE(_MEM
, RES_USAGE
),
4704 .read_u64
= mem_cgroup_read
,
4705 .register_event
= mem_cgroup_usage_register_event
,
4706 .unregister_event
= mem_cgroup_usage_unregister_event
,
4709 .name
= "max_usage_in_bytes",
4710 .private = MEMFILE_PRIVATE(_MEM
, RES_MAX_USAGE
),
4711 .trigger
= mem_cgroup_reset
,
4712 .read_u64
= mem_cgroup_read
,
4715 .name
= "limit_in_bytes",
4716 .private = MEMFILE_PRIVATE(_MEM
, RES_LIMIT
),
4717 .write_string
= mem_cgroup_write
,
4718 .read_u64
= mem_cgroup_read
,
4721 .name
= "soft_limit_in_bytes",
4722 .private = MEMFILE_PRIVATE(_MEM
, RES_SOFT_LIMIT
),
4723 .write_string
= mem_cgroup_write
,
4724 .read_u64
= mem_cgroup_read
,
4728 .private = MEMFILE_PRIVATE(_MEM
, RES_FAILCNT
),
4729 .trigger
= mem_cgroup_reset
,
4730 .read_u64
= mem_cgroup_read
,
4734 .read_map
= mem_control_stat_show
,
4737 .name
= "force_empty",
4738 .trigger
= mem_cgroup_force_empty_write
,
4741 .name
= "use_hierarchy",
4742 .write_u64
= mem_cgroup_hierarchy_write
,
4743 .read_u64
= mem_cgroup_hierarchy_read
,
4746 .name
= "swappiness",
4747 .read_u64
= mem_cgroup_swappiness_read
,
4748 .write_u64
= mem_cgroup_swappiness_write
,
4751 .name
= "move_charge_at_immigrate",
4752 .read_u64
= mem_cgroup_move_charge_read
,
4753 .write_u64
= mem_cgroup_move_charge_write
,
4756 .name
= "oom_control",
4757 .read_map
= mem_cgroup_oom_control_read
,
4758 .write_u64
= mem_cgroup_oom_control_write
,
4759 .register_event
= mem_cgroup_oom_register_event
,
4760 .unregister_event
= mem_cgroup_oom_unregister_event
,
4761 .private = MEMFILE_PRIVATE(_OOM_TYPE
, OOM_CONTROL
),
4765 .name
= "numa_stat",
4766 .open
= mem_control_numa_stat_open
,
4772 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4773 static struct cftype memsw_cgroup_files
[] = {
4775 .name
= "memsw.usage_in_bytes",
4776 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_USAGE
),
4777 .read_u64
= mem_cgroup_read
,
4778 .register_event
= mem_cgroup_usage_register_event
,
4779 .unregister_event
= mem_cgroup_usage_unregister_event
,
4782 .name
= "memsw.max_usage_in_bytes",
4783 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_MAX_USAGE
),
4784 .trigger
= mem_cgroup_reset
,
4785 .read_u64
= mem_cgroup_read
,
4788 .name
= "memsw.limit_in_bytes",
4789 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_LIMIT
),
4790 .write_string
= mem_cgroup_write
,
4791 .read_u64
= mem_cgroup_read
,
4794 .name
= "memsw.failcnt",
4795 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_FAILCNT
),
4796 .trigger
= mem_cgroup_reset
,
4797 .read_u64
= mem_cgroup_read
,
4801 static int register_memsw_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
4803 if (!do_swap_account
)
4805 return cgroup_add_files(cont
, ss
, memsw_cgroup_files
,
4806 ARRAY_SIZE(memsw_cgroup_files
));
4809 static int register_memsw_files(struct cgroup
*cont
, struct cgroup_subsys
*ss
)
4815 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup
*memcg
, int node
)
4817 struct mem_cgroup_per_node
*pn
;
4818 struct mem_cgroup_per_zone
*mz
;
4820 int zone
, tmp
= node
;
4822 * This routine is called against possible nodes.
4823 * But it's BUG to call kmalloc() against offline node.
4825 * TODO: this routine can waste much memory for nodes which will
4826 * never be onlined. It's better to use memory hotplug callback
4829 if (!node_state(node
, N_NORMAL_MEMORY
))
4831 pn
= kzalloc_node(sizeof(*pn
), GFP_KERNEL
, tmp
);
4835 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
4836 mz
= &pn
->zoneinfo
[zone
];
4838 INIT_LIST_HEAD(&mz
->lruvec
.lists
[l
]);
4839 mz
->usage_in_excess
= 0;
4840 mz
->on_tree
= false;
4843 memcg
->info
.nodeinfo
[node
] = pn
;
4847 static void free_mem_cgroup_per_zone_info(struct mem_cgroup
*memcg
, int node
)
4849 kfree(memcg
->info
.nodeinfo
[node
]);
4852 static struct mem_cgroup
*mem_cgroup_alloc(void)
4854 struct mem_cgroup
*mem
;
4855 int size
= sizeof(struct mem_cgroup
);
4857 /* Can be very big if MAX_NUMNODES is very big */
4858 if (size
< PAGE_SIZE
)
4859 mem
= kzalloc(size
, GFP_KERNEL
);
4861 mem
= vzalloc(size
);
4866 mem
->stat
= alloc_percpu(struct mem_cgroup_stat_cpu
);
4869 spin_lock_init(&mem
->pcp_counter_lock
);
4873 if (size
< PAGE_SIZE
)
4881 * At destroying mem_cgroup, references from swap_cgroup can remain.
4882 * (scanning all at force_empty is too costly...)
4884 * Instead of clearing all references at force_empty, we remember
4885 * the number of reference from swap_cgroup and free mem_cgroup when
4886 * it goes down to 0.
4888 * Removal of cgroup itself succeeds regardless of refs from swap.
4891 static void __mem_cgroup_free(struct mem_cgroup
*memcg
)
4895 mem_cgroup_remove_from_trees(memcg
);
4896 free_css_id(&mem_cgroup_subsys
, &memcg
->css
);
4898 for_each_node_state(node
, N_POSSIBLE
)
4899 free_mem_cgroup_per_zone_info(memcg
, node
);
4901 free_percpu(memcg
->stat
);
4902 if (sizeof(struct mem_cgroup
) < PAGE_SIZE
)
4908 static void mem_cgroup_get(struct mem_cgroup
*memcg
)
4910 atomic_inc(&memcg
->refcnt
);
4913 static void __mem_cgroup_put(struct mem_cgroup
*memcg
, int count
)
4915 if (atomic_sub_and_test(count
, &memcg
->refcnt
)) {
4916 struct mem_cgroup
*parent
= parent_mem_cgroup(memcg
);
4917 __mem_cgroup_free(memcg
);
4919 mem_cgroup_put(parent
);
4923 static void mem_cgroup_put(struct mem_cgroup
*memcg
)
4925 __mem_cgroup_put(memcg
, 1);
4929 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4931 struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
4933 if (!memcg
->res
.parent
)
4935 return mem_cgroup_from_res_counter(memcg
->res
.parent
, res
);
4937 EXPORT_SYMBOL(parent_mem_cgroup
);
4939 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4940 static void __init
enable_swap_cgroup(void)
4942 if (!mem_cgroup_disabled() && really_do_swap_account
)
4943 do_swap_account
= 1;
4946 static void __init
enable_swap_cgroup(void)
4951 static int mem_cgroup_soft_limit_tree_init(void)
4953 struct mem_cgroup_tree_per_node
*rtpn
;
4954 struct mem_cgroup_tree_per_zone
*rtpz
;
4955 int tmp
, node
, zone
;
4957 for_each_node_state(node
, N_POSSIBLE
) {
4959 if (!node_state(node
, N_NORMAL_MEMORY
))
4961 rtpn
= kzalloc_node(sizeof(*rtpn
), GFP_KERNEL
, tmp
);
4965 soft_limit_tree
.rb_tree_per_node
[node
] = rtpn
;
4967 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++) {
4968 rtpz
= &rtpn
->rb_tree_per_zone
[zone
];
4969 rtpz
->rb_root
= RB_ROOT
;
4970 spin_lock_init(&rtpz
->lock
);
4976 for_each_node_state(node
, N_POSSIBLE
) {
4977 if (!soft_limit_tree
.rb_tree_per_node
[node
])
4979 kfree(soft_limit_tree
.rb_tree_per_node
[node
]);
4980 soft_limit_tree
.rb_tree_per_node
[node
] = NULL
;
4986 static struct cgroup_subsys_state
* __ref
4987 mem_cgroup_create(struct cgroup_subsys
*ss
, struct cgroup
*cont
)
4989 struct mem_cgroup
*memcg
, *parent
;
4990 long error
= -ENOMEM
;
4993 memcg
= mem_cgroup_alloc();
4995 return ERR_PTR(error
);
4997 for_each_node_state(node
, N_POSSIBLE
)
4998 if (alloc_mem_cgroup_per_zone_info(memcg
, node
))
5002 if (cont
->parent
== NULL
) {
5004 enable_swap_cgroup();
5006 if (mem_cgroup_soft_limit_tree_init())
5008 root_mem_cgroup
= memcg
;
5009 for_each_possible_cpu(cpu
) {
5010 struct memcg_stock_pcp
*stock
=
5011 &per_cpu(memcg_stock
, cpu
);
5012 INIT_WORK(&stock
->work
, drain_local_stock
);
5014 hotcpu_notifier(memcg_cpu_hotplug_callback
, 0);
5016 parent
= mem_cgroup_from_cont(cont
->parent
);
5017 memcg
->use_hierarchy
= parent
->use_hierarchy
;
5018 memcg
->oom_kill_disable
= parent
->oom_kill_disable
;
5021 if (parent
&& parent
->use_hierarchy
) {
5022 res_counter_init(&memcg
->res
, &parent
->res
);
5023 res_counter_init(&memcg
->memsw
, &parent
->memsw
);
5025 * We increment refcnt of the parent to ensure that we can
5026 * safely access it on res_counter_charge/uncharge.
5027 * This refcnt will be decremented when freeing this
5028 * mem_cgroup(see mem_cgroup_put).
5030 mem_cgroup_get(parent
);
5032 res_counter_init(&memcg
->res
, NULL
);
5033 res_counter_init(&memcg
->memsw
, NULL
);
5035 memcg
->last_scanned_node
= MAX_NUMNODES
;
5036 INIT_LIST_HEAD(&memcg
->oom_notify
);
5039 memcg
->swappiness
= mem_cgroup_swappiness(parent
);
5040 atomic_set(&memcg
->refcnt
, 1);
5041 memcg
->move_charge_at_immigrate
= 0;
5042 mutex_init(&memcg
->thresholds_lock
);
5045 __mem_cgroup_free(memcg
);
5046 return ERR_PTR(error
);
5049 static int mem_cgroup_pre_destroy(struct cgroup_subsys
*ss
,
5050 struct cgroup
*cont
)
5052 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
5054 return mem_cgroup_force_empty(memcg
, false);
5057 static void mem_cgroup_destroy(struct cgroup_subsys
*ss
,
5058 struct cgroup
*cont
)
5060 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cont
);
5062 kmem_cgroup_destroy(ss
, cont
);
5064 mem_cgroup_put(memcg
);
5067 static int mem_cgroup_populate(struct cgroup_subsys
*ss
,
5068 struct cgroup
*cont
)
5072 ret
= cgroup_add_files(cont
, ss
, mem_cgroup_files
,
5073 ARRAY_SIZE(mem_cgroup_files
));
5076 ret
= register_memsw_files(cont
, ss
);
5079 ret
= register_kmem_files(cont
, ss
);
5085 /* Handlers for move charge at task migration. */
5086 #define PRECHARGE_COUNT_AT_ONCE 256
5087 static int mem_cgroup_do_precharge(unsigned long count
)
5090 int batch_count
= PRECHARGE_COUNT_AT_ONCE
;
5091 struct mem_cgroup
*memcg
= mc
.to
;
5093 if (mem_cgroup_is_root(memcg
)) {
5094 mc
.precharge
+= count
;
5095 /* we don't need css_get for root */
5098 /* try to charge at once */
5100 struct res_counter
*dummy
;
5102 * "memcg" cannot be under rmdir() because we've already checked
5103 * by cgroup_lock_live_cgroup() that it is not removed and we
5104 * are still under the same cgroup_mutex. So we can postpone
5107 if (res_counter_charge(&memcg
->res
, PAGE_SIZE
* count
, &dummy
))
5109 if (do_swap_account
&& res_counter_charge(&memcg
->memsw
,
5110 PAGE_SIZE
* count
, &dummy
)) {
5111 res_counter_uncharge(&memcg
->res
, PAGE_SIZE
* count
);
5114 mc
.precharge
+= count
;
5118 /* fall back to one by one charge */
5120 if (signal_pending(current
)) {
5124 if (!batch_count
--) {
5125 batch_count
= PRECHARGE_COUNT_AT_ONCE
;
5128 ret
= __mem_cgroup_try_charge(NULL
,
5129 GFP_KERNEL
, 1, &memcg
, false);
5131 /* mem_cgroup_clear_mc() will do uncharge later */
5139 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5140 * @vma: the vma the pte to be checked belongs
5141 * @addr: the address corresponding to the pte to be checked
5142 * @ptent: the pte to be checked
5143 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5146 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5147 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5148 * move charge. if @target is not NULL, the page is stored in target->page
5149 * with extra refcnt got(Callers should handle it).
5150 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5151 * target for charge migration. if @target is not NULL, the entry is stored
5154 * Called with pte lock held.
5161 enum mc_target_type
{
5162 MC_TARGET_NONE
, /* not used */
5167 static struct page
*mc_handle_present_pte(struct vm_area_struct
*vma
,
5168 unsigned long addr
, pte_t ptent
)
5170 struct page
*page
= vm_normal_page(vma
, addr
, ptent
);
5172 if (!page
|| !page_mapped(page
))
5174 if (PageAnon(page
)) {
5175 /* we don't move shared anon */
5176 if (!move_anon() || page_mapcount(page
) > 2)
5178 } else if (!move_file())
5179 /* we ignore mapcount for file pages */
5181 if (!get_page_unless_zero(page
))
5187 static struct page
*mc_handle_swap_pte(struct vm_area_struct
*vma
,
5188 unsigned long addr
, pte_t ptent
, swp_entry_t
*entry
)
5191 struct page
*page
= NULL
;
5192 swp_entry_t ent
= pte_to_swp_entry(ptent
);
5194 if (!move_anon() || non_swap_entry(ent
))
5196 usage_count
= mem_cgroup_count_swap_user(ent
, &page
);
5197 if (usage_count
> 1) { /* we don't move shared anon */
5202 if (do_swap_account
)
5203 entry
->val
= ent
.val
;
5208 static struct page
*mc_handle_file_pte(struct vm_area_struct
*vma
,
5209 unsigned long addr
, pte_t ptent
, swp_entry_t
*entry
)
5211 struct page
*page
= NULL
;
5212 struct inode
*inode
;
5213 struct address_space
*mapping
;
5216 if (!vma
->vm_file
) /* anonymous vma */
5221 inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
5222 mapping
= vma
->vm_file
->f_mapping
;
5223 if (pte_none(ptent
))
5224 pgoff
= linear_page_index(vma
, addr
);
5225 else /* pte_file(ptent) is true */
5226 pgoff
= pte_to_pgoff(ptent
);
5228 /* page is moved even if it's not RSS of this task(page-faulted). */
5229 page
= find_get_page(mapping
, pgoff
);
5232 /* shmem/tmpfs may report page out on swap: account for that too. */
5233 if (radix_tree_exceptional_entry(page
)) {
5234 swp_entry_t swap
= radix_to_swp_entry(page
);
5235 if (do_swap_account
)
5237 page
= find_get_page(&swapper_space
, swap
.val
);
5243 static int is_target_pte_for_mc(struct vm_area_struct
*vma
,
5244 unsigned long addr
, pte_t ptent
, union mc_target
*target
)
5246 struct page
*page
= NULL
;
5247 struct page_cgroup
*pc
;
5249 swp_entry_t ent
= { .val
= 0 };
5251 if (pte_present(ptent
))
5252 page
= mc_handle_present_pte(vma
, addr
, ptent
);
5253 else if (is_swap_pte(ptent
))
5254 page
= mc_handle_swap_pte(vma
, addr
, ptent
, &ent
);
5255 else if (pte_none(ptent
) || pte_file(ptent
))
5256 page
= mc_handle_file_pte(vma
, addr
, ptent
, &ent
);
5258 if (!page
&& !ent
.val
)
5261 pc
= lookup_page_cgroup(page
);
5263 * Do only loose check w/o page_cgroup lock.
5264 * mem_cgroup_move_account() checks the pc is valid or not under
5267 if (PageCgroupUsed(pc
) && pc
->mem_cgroup
== mc
.from
) {
5268 ret
= MC_TARGET_PAGE
;
5270 target
->page
= page
;
5272 if (!ret
|| !target
)
5275 /* There is a swap entry and a page doesn't exist or isn't charged */
5276 if (ent
.val
&& !ret
&&
5277 css_id(&mc
.from
->css
) == lookup_swap_cgroup_id(ent
)) {
5278 ret
= MC_TARGET_SWAP
;
5285 static int mem_cgroup_count_precharge_pte_range(pmd_t
*pmd
,
5286 unsigned long addr
, unsigned long end
,
5287 struct mm_walk
*walk
)
5289 struct vm_area_struct
*vma
= walk
->private;
5293 split_huge_page_pmd(walk
->mm
, pmd
);
5295 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
5296 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
5297 if (is_target_pte_for_mc(vma
, addr
, *pte
, NULL
))
5298 mc
.precharge
++; /* increment precharge temporarily */
5299 pte_unmap_unlock(pte
- 1, ptl
);
5305 static unsigned long mem_cgroup_count_precharge(struct mm_struct
*mm
)
5307 unsigned long precharge
;
5308 struct vm_area_struct
*vma
;
5310 down_read(&mm
->mmap_sem
);
5311 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
5312 struct mm_walk mem_cgroup_count_precharge_walk
= {
5313 .pmd_entry
= mem_cgroup_count_precharge_pte_range
,
5317 if (is_vm_hugetlb_page(vma
))
5319 walk_page_range(vma
->vm_start
, vma
->vm_end
,
5320 &mem_cgroup_count_precharge_walk
);
5322 up_read(&mm
->mmap_sem
);
5324 precharge
= mc
.precharge
;
5330 static int mem_cgroup_precharge_mc(struct mm_struct
*mm
)
5332 unsigned long precharge
= mem_cgroup_count_precharge(mm
);
5334 VM_BUG_ON(mc
.moving_task
);
5335 mc
.moving_task
= current
;
5336 return mem_cgroup_do_precharge(precharge
);
5339 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5340 static void __mem_cgroup_clear_mc(void)
5342 struct mem_cgroup
*from
= mc
.from
;
5343 struct mem_cgroup
*to
= mc
.to
;
5345 /* we must uncharge all the leftover precharges from mc.to */
5347 __mem_cgroup_cancel_charge(mc
.to
, mc
.precharge
);
5351 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5352 * we must uncharge here.
5354 if (mc
.moved_charge
) {
5355 __mem_cgroup_cancel_charge(mc
.from
, mc
.moved_charge
);
5356 mc
.moved_charge
= 0;
5358 /* we must fixup refcnts and charges */
5359 if (mc
.moved_swap
) {
5360 /* uncharge swap account from the old cgroup */
5361 if (!mem_cgroup_is_root(mc
.from
))
5362 res_counter_uncharge(&mc
.from
->memsw
,
5363 PAGE_SIZE
* mc
.moved_swap
);
5364 __mem_cgroup_put(mc
.from
, mc
.moved_swap
);
5366 if (!mem_cgroup_is_root(mc
.to
)) {
5368 * we charged both to->res and to->memsw, so we should
5371 res_counter_uncharge(&mc
.to
->res
,
5372 PAGE_SIZE
* mc
.moved_swap
);
5374 /* we've already done mem_cgroup_get(mc.to) */
5377 memcg_oom_recover(from
);
5378 memcg_oom_recover(to
);
5379 wake_up_all(&mc
.waitq
);
5382 static void mem_cgroup_clear_mc(void)
5384 struct mem_cgroup
*from
= mc
.from
;
5387 * we must clear moving_task before waking up waiters at the end of
5390 mc
.moving_task
= NULL
;
5391 __mem_cgroup_clear_mc();
5392 spin_lock(&mc
.lock
);
5395 spin_unlock(&mc
.lock
);
5396 mem_cgroup_end_move(from
);
5399 static int mem_cgroup_can_attach(struct cgroup_subsys
*ss
,
5400 struct cgroup
*cgroup
,
5401 struct cgroup_taskset
*tset
)
5403 struct task_struct
*p
= cgroup_taskset_first(tset
);
5405 struct mem_cgroup
*memcg
= mem_cgroup_from_cont(cgroup
);
5407 if (memcg
->move_charge_at_immigrate
) {
5408 struct mm_struct
*mm
;
5409 struct mem_cgroup
*from
= mem_cgroup_from_task(p
);
5411 VM_BUG_ON(from
== memcg
);
5413 mm
= get_task_mm(p
);
5416 /* We move charges only when we move a owner of the mm */
5417 if (mm
->owner
== p
) {
5420 VM_BUG_ON(mc
.precharge
);
5421 VM_BUG_ON(mc
.moved_charge
);
5422 VM_BUG_ON(mc
.moved_swap
);
5423 mem_cgroup_start_move(from
);
5424 spin_lock(&mc
.lock
);
5427 spin_unlock(&mc
.lock
);
5428 /* We set mc.moving_task later */
5430 ret
= mem_cgroup_precharge_mc(mm
);
5432 mem_cgroup_clear_mc();
5439 static void mem_cgroup_cancel_attach(struct cgroup_subsys
*ss
,
5440 struct cgroup
*cgroup
,
5441 struct cgroup_taskset
*tset
)
5443 mem_cgroup_clear_mc();
5446 static int mem_cgroup_move_charge_pte_range(pmd_t
*pmd
,
5447 unsigned long addr
, unsigned long end
,
5448 struct mm_walk
*walk
)
5451 struct vm_area_struct
*vma
= walk
->private;
5455 split_huge_page_pmd(walk
->mm
, pmd
);
5457 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
5458 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
5459 pte_t ptent
= *(pte
++);
5460 union mc_target target
;
5463 struct page_cgroup
*pc
;
5469 type
= is_target_pte_for_mc(vma
, addr
, ptent
, &target
);
5471 case MC_TARGET_PAGE
:
5473 if (isolate_lru_page(page
))
5475 pc
= lookup_page_cgroup(page
);
5476 if (!mem_cgroup_move_account(page
, 1, pc
,
5477 mc
.from
, mc
.to
, false)) {
5479 /* we uncharge from mc.from later. */
5482 putback_lru_page(page
);
5483 put
: /* is_target_pte_for_mc() gets the page */
5486 case MC_TARGET_SWAP
:
5488 if (!mem_cgroup_move_swap_account(ent
,
5489 mc
.from
, mc
.to
, false)) {
5491 /* we fixup refcnts and charges later. */
5499 pte_unmap_unlock(pte
- 1, ptl
);
5504 * We have consumed all precharges we got in can_attach().
5505 * We try charge one by one, but don't do any additional
5506 * charges to mc.to if we have failed in charge once in attach()
5509 ret
= mem_cgroup_do_precharge(1);
5517 static void mem_cgroup_move_charge(struct mm_struct
*mm
)
5519 struct vm_area_struct
*vma
;
5521 lru_add_drain_all();
5523 if (unlikely(!down_read_trylock(&mm
->mmap_sem
))) {
5525 * Someone who are holding the mmap_sem might be waiting in
5526 * waitq. So we cancel all extra charges, wake up all waiters,
5527 * and retry. Because we cancel precharges, we might not be able
5528 * to move enough charges, but moving charge is a best-effort
5529 * feature anyway, so it wouldn't be a big problem.
5531 __mem_cgroup_clear_mc();
5535 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
5537 struct mm_walk mem_cgroup_move_charge_walk
= {
5538 .pmd_entry
= mem_cgroup_move_charge_pte_range
,
5542 if (is_vm_hugetlb_page(vma
))
5544 ret
= walk_page_range(vma
->vm_start
, vma
->vm_end
,
5545 &mem_cgroup_move_charge_walk
);
5548 * means we have consumed all precharges and failed in
5549 * doing additional charge. Just abandon here.
5553 up_read(&mm
->mmap_sem
);
5556 static void mem_cgroup_move_task(struct cgroup_subsys
*ss
,
5557 struct cgroup
*cont
,
5558 struct cgroup_taskset
*tset
)
5560 struct task_struct
*p
= cgroup_taskset_first(tset
);
5561 struct mm_struct
*mm
= get_task_mm(p
);
5565 mem_cgroup_move_charge(mm
);
5570 mem_cgroup_clear_mc();
5572 #else /* !CONFIG_MMU */
5573 static int mem_cgroup_can_attach(struct cgroup_subsys
*ss
,
5574 struct cgroup
*cgroup
,
5575 struct cgroup_taskset
*tset
)
5579 static void mem_cgroup_cancel_attach(struct cgroup_subsys
*ss
,
5580 struct cgroup
*cgroup
,
5581 struct cgroup_taskset
*tset
)
5584 static void mem_cgroup_move_task(struct cgroup_subsys
*ss
,
5585 struct cgroup
*cont
,
5586 struct cgroup_taskset
*tset
)
5591 struct cgroup_subsys mem_cgroup_subsys
= {
5593 .subsys_id
= mem_cgroup_subsys_id
,
5594 .create
= mem_cgroup_create
,
5595 .pre_destroy
= mem_cgroup_pre_destroy
,
5596 .destroy
= mem_cgroup_destroy
,
5597 .populate
= mem_cgroup_populate
,
5598 .can_attach
= mem_cgroup_can_attach
,
5599 .cancel_attach
= mem_cgroup_cancel_attach
,
5600 .attach
= mem_cgroup_move_task
,
5605 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5606 static int __init
enable_swap_account(char *s
)
5608 /* consider enabled if no parameter or 1 is given */
5609 if (!strcmp(s
, "1"))
5610 really_do_swap_account
= 1;
5611 else if (!strcmp(s
, "0"))
5612 really_do_swap_account
= 0;
5615 __setup("swapaccount=", enable_swap_account
);