]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/memcontrol.c
memcg: add memory.vmscan_stat
[mirror_ubuntu-zesty-kernel.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
8cdea7c0
BS
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
78fb7466 27#include <linux/mm.h>
4ffef5fe 28#include <linux/hugetlb.h>
d13d1443 29#include <linux/pagemap.h>
d52aa412 30#include <linux/smp.h>
8a9f3ccd 31#include <linux/page-flags.h>
66e1707b 32#include <linux/backing-dev.h>
8a9f3ccd
BS
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
e222432b 35#include <linux/limits.h>
8c7c6e34 36#include <linux/mutex.h>
f64c3f54 37#include <linux/rbtree.h>
072441e2 38#include <linux/shmem_fs.h>
b6ac57d5 39#include <linux/slab.h>
66e1707b 40#include <linux/swap.h>
02491447 41#include <linux/swapops.h>
66e1707b 42#include <linux/spinlock.h>
2e72b634
KS
43#include <linux/eventfd.h>
44#include <linux/sort.h>
66e1707b 45#include <linux/fs.h>
d2ceb9b7 46#include <linux/seq_file.h>
33327948 47#include <linux/vmalloc.h>
b69408e8 48#include <linux/mm_inline.h>
52d4b9ac 49#include <linux/page_cgroup.h>
cdec2e42 50#include <linux/cpu.h>
158e0a2d 51#include <linux/oom.h>
08e552c6 52#include "internal.h"
8cdea7c0 53
8697d331
BS
54#include <asm/uaccess.h>
55
cc8e970c
KM
56#include <trace/events/vmscan.h>
57
a181b0e8 58struct cgroup_subsys mem_cgroup_subsys __read_mostly;
a181b0e8 59#define MEM_CGROUP_RECLAIM_RETRIES 5
4b3bde4c 60struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c0 61
c077719b 62#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
338c8431 63/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719b 64int do_swap_account __read_mostly;
a42c390c
MH
65
66/* for remember boot option*/
67#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
68static int really_do_swap_account __initdata = 1;
69#else
70static int really_do_swap_account __initdata = 0;
71#endif
72
c077719b
KH
73#else
74#define do_swap_account (0)
75#endif
76
77
d52aa412
KH
78/*
79 * Statistics for memory cgroup.
80 */
81enum mem_cgroup_stat_index {
82 /*
83 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
84 */
85 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
d69b042f 86 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
d8046582 87 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
0c3e73e8 88 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
711d3d2c 89 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
32047e2a 90 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
d52aa412
KH
91 MEM_CGROUP_STAT_NSTATS,
92};
93
e9f8974f
JW
94enum mem_cgroup_events_index {
95 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
96 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
97 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
456f998e
YH
98 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
99 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
e9f8974f
JW
100 MEM_CGROUP_EVENTS_NSTATS,
101};
7a159cc9
JW
102/*
103 * Per memcg event counter is incremented at every pagein/pageout. With THP,
104 * it will be incremated by the number of pages. This counter is used for
105 * for trigger some periodic events. This is straightforward and better
106 * than using jiffies etc. to handle periodic memcg event.
107 */
108enum mem_cgroup_events_target {
109 MEM_CGROUP_TARGET_THRESH,
110 MEM_CGROUP_TARGET_SOFTLIMIT,
453a9bf3 111 MEM_CGROUP_TARGET_NUMAINFO,
7a159cc9
JW
112 MEM_CGROUP_NTARGETS,
113};
114#define THRESHOLDS_EVENTS_TARGET (128)
115#define SOFTLIMIT_EVENTS_TARGET (1024)
453a9bf3 116#define NUMAINFO_EVENTS_TARGET (1024)
e9f8974f 117
d52aa412 118struct mem_cgroup_stat_cpu {
7a159cc9 119 long count[MEM_CGROUP_STAT_NSTATS];
e9f8974f 120 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
7a159cc9 121 unsigned long targets[MEM_CGROUP_NTARGETS];
d52aa412
KH
122};
123
6d12e2d8
KH
124/*
125 * per-zone information in memory controller.
126 */
6d12e2d8 127struct mem_cgroup_per_zone {
072c56c1
KH
128 /*
129 * spin_lock to protect the per cgroup LRU
130 */
b69408e8
CL
131 struct list_head lists[NR_LRU_LISTS];
132 unsigned long count[NR_LRU_LISTS];
3e2f41f1
KM
133
134 struct zone_reclaim_stat reclaim_stat;
f64c3f54
BS
135 struct rb_node tree_node; /* RB tree node */
136 unsigned long long usage_in_excess;/* Set to the value by which */
137 /* the soft limit is exceeded*/
138 bool on_tree;
4e416953
BS
139 struct mem_cgroup *mem; /* Back pointer, we cannot */
140 /* use container_of */
6d12e2d8
KH
141};
142/* Macro for accessing counter */
143#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
144
145struct mem_cgroup_per_node {
146 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
147};
148
149struct mem_cgroup_lru_info {
150 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
151};
152
f64c3f54
BS
153/*
154 * Cgroups above their limits are maintained in a RB-Tree, independent of
155 * their hierarchy representation
156 */
157
158struct mem_cgroup_tree_per_zone {
159 struct rb_root rb_root;
160 spinlock_t lock;
161};
162
163struct mem_cgroup_tree_per_node {
164 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
165};
166
167struct mem_cgroup_tree {
168 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
169};
170
171static struct mem_cgroup_tree soft_limit_tree __read_mostly;
172
2e72b634
KS
173struct mem_cgroup_threshold {
174 struct eventfd_ctx *eventfd;
175 u64 threshold;
176};
177
9490ff27 178/* For threshold */
2e72b634
KS
179struct mem_cgroup_threshold_ary {
180 /* An array index points to threshold just below usage. */
5407a562 181 int current_threshold;
2e72b634
KS
182 /* Size of entries[] */
183 unsigned int size;
184 /* Array of thresholds */
185 struct mem_cgroup_threshold entries[0];
186};
2c488db2
KS
187
188struct mem_cgroup_thresholds {
189 /* Primary thresholds array */
190 struct mem_cgroup_threshold_ary *primary;
191 /*
192 * Spare threshold array.
193 * This is needed to make mem_cgroup_unregister_event() "never fail".
194 * It must be able to store at least primary->size - 1 entries.
195 */
196 struct mem_cgroup_threshold_ary *spare;
197};
198
9490ff27
KH
199/* for OOM */
200struct mem_cgroup_eventfd_list {
201 struct list_head list;
202 struct eventfd_ctx *eventfd;
203};
2e72b634 204
2e72b634 205static void mem_cgroup_threshold(struct mem_cgroup *mem);
9490ff27 206static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
2e72b634 207
82f9d486
KH
208enum {
209 SCAN_BY_LIMIT,
210 SCAN_BY_SYSTEM,
211 NR_SCAN_CONTEXT,
212 SCAN_BY_SHRINK, /* not recorded now */
213};
214
215enum {
216 SCAN,
217 SCAN_ANON,
218 SCAN_FILE,
219 ROTATE,
220 ROTATE_ANON,
221 ROTATE_FILE,
222 FREED,
223 FREED_ANON,
224 FREED_FILE,
225 ELAPSED,
226 NR_SCANSTATS,
227};
228
229struct scanstat {
230 spinlock_t lock;
231 unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS];
232 unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS];
233};
234
235const char *scanstat_string[NR_SCANSTATS] = {
236 "scanned_pages",
237 "scanned_anon_pages",
238 "scanned_file_pages",
239 "rotated_pages",
240 "rotated_anon_pages",
241 "rotated_file_pages",
242 "freed_pages",
243 "freed_anon_pages",
244 "freed_file_pages",
245 "elapsed_ns",
246};
247#define SCANSTAT_WORD_LIMIT "_by_limit"
248#define SCANSTAT_WORD_SYSTEM "_by_system"
249#define SCANSTAT_WORD_HIERARCHY "_under_hierarchy"
250
251
8cdea7c0
BS
252/*
253 * The memory controller data structure. The memory controller controls both
254 * page cache and RSS per cgroup. We would eventually like to provide
255 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
256 * to help the administrator determine what knobs to tune.
257 *
258 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
259 * we hit the water mark. May be even add a low water mark, such that
260 * no reclaim occurs from a cgroup at it's low water mark, this is
261 * a feature that will be implemented much later in the future.
8cdea7c0
BS
262 */
263struct mem_cgroup {
264 struct cgroup_subsys_state css;
265 /*
266 * the counter to account for memory usage
267 */
268 struct res_counter res;
8c7c6e34
KH
269 /*
270 * the counter to account for mem+swap usage.
271 */
272 struct res_counter memsw;
78fb7466
PE
273 /*
274 * Per cgroup active and inactive list, similar to the
275 * per zone LRU lists.
78fb7466 276 */
6d12e2d8 277 struct mem_cgroup_lru_info info;
6d61ef40 278 /*
af901ca1 279 * While reclaiming in a hierarchy, we cache the last child we
04046e1a 280 * reclaimed from.
6d61ef40 281 */
04046e1a 282 int last_scanned_child;
889976db
YH
283 int last_scanned_node;
284#if MAX_NUMNODES > 1
285 nodemask_t scan_nodes;
453a9bf3
KH
286 atomic_t numainfo_events;
287 atomic_t numainfo_updating;
889976db 288#endif
18f59ea7
BS
289 /*
290 * Should the accounting and control be hierarchical, per subtree?
291 */
292 bool use_hierarchy;
79dfdacc
MH
293
294 bool oom_lock;
295 atomic_t under_oom;
296
8c7c6e34 297 atomic_t refcnt;
14797e23 298
1f4c025b 299 int swappiness;
3c11ecf4
KH
300 /* OOM-Killer disable */
301 int oom_kill_disable;
a7885eb8 302
22a668d7
KH
303 /* set when res.limit == memsw.limit */
304 bool memsw_is_minimum;
305
2e72b634
KS
306 /* protect arrays of thresholds */
307 struct mutex thresholds_lock;
308
309 /* thresholds for memory usage. RCU-protected */
2c488db2 310 struct mem_cgroup_thresholds thresholds;
907860ed 311
2e72b634 312 /* thresholds for mem+swap usage. RCU-protected */
2c488db2 313 struct mem_cgroup_thresholds memsw_thresholds;
907860ed 314
9490ff27
KH
315 /* For oom notifier event fd */
316 struct list_head oom_notify;
82f9d486
KH
317 /* For recording LRU-scan statistics */
318 struct scanstat scanstat;
7dc74be0
DN
319 /*
320 * Should we move charges of a task when a task is moved into this
321 * mem_cgroup ? And what type of charges should we move ?
322 */
323 unsigned long move_charge_at_immigrate;
d52aa412 324 /*
c62b1a3b 325 * percpu counter.
d52aa412 326 */
c62b1a3b 327 struct mem_cgroup_stat_cpu *stat;
711d3d2c
KH
328 /*
329 * used when a cpu is offlined or other synchronizations
330 * See mem_cgroup_read_stat().
331 */
332 struct mem_cgroup_stat_cpu nocpu_base;
333 spinlock_t pcp_counter_lock;
8cdea7c0
BS
334};
335
7dc74be0
DN
336/* Stuffs for move charges at task migration. */
337/*
338 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
339 * left-shifted bitmap of these types.
340 */
341enum move_type {
4ffef5fe 342 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
87946a72 343 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
7dc74be0
DN
344 NR_MOVE_TYPE,
345};
346
4ffef5fe
DN
347/* "mc" and its members are protected by cgroup_mutex */
348static struct move_charge_struct {
b1dd693e 349 spinlock_t lock; /* for from, to */
4ffef5fe
DN
350 struct mem_cgroup *from;
351 struct mem_cgroup *to;
352 unsigned long precharge;
854ffa8d 353 unsigned long moved_charge;
483c30b5 354 unsigned long moved_swap;
8033b97c
DN
355 struct task_struct *moving_task; /* a task moving charges */
356 wait_queue_head_t waitq; /* a waitq for other context */
357} mc = {
2bd9bb20 358 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
359 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
360};
4ffef5fe 361
90254a65
DN
362static bool move_anon(void)
363{
364 return test_bit(MOVE_CHARGE_TYPE_ANON,
365 &mc.to->move_charge_at_immigrate);
366}
367
87946a72
DN
368static bool move_file(void)
369{
370 return test_bit(MOVE_CHARGE_TYPE_FILE,
371 &mc.to->move_charge_at_immigrate);
372}
373
4e416953
BS
374/*
375 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
376 * limit reclaim to prevent infinite loops, if they ever occur.
377 */
378#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
379#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
380
217bc319
KH
381enum charge_type {
382 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
383 MEM_CGROUP_CHARGE_TYPE_MAPPED,
4f98a2fe 384 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
c05555b5 385 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
d13d1443 386 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 387 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
388 NR_CHARGE_TYPE,
389};
390
8c7c6e34
KH
391/* for encoding cft->private value on file */
392#define _MEM (0)
393#define _MEMSWAP (1)
9490ff27 394#define _OOM_TYPE (2)
8c7c6e34
KH
395#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
396#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
397#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
398/* Used for OOM nofiier */
399#define OOM_CONTROL (0)
8c7c6e34 400
75822b44
BS
401/*
402 * Reclaim flags for mem_cgroup_hierarchical_reclaim
403 */
404#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
405#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
406#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
407#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
4e416953
BS
408#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
409#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
75822b44 410
8c7c6e34
KH
411static void mem_cgroup_get(struct mem_cgroup *mem);
412static void mem_cgroup_put(struct mem_cgroup *mem);
7bcc1bb1 413static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
26fe6168 414static void drain_all_stock_async(struct mem_cgroup *mem);
8c7c6e34 415
f64c3f54
BS
416static struct mem_cgroup_per_zone *
417mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
418{
419 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
420}
421
d324236b
WF
422struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
423{
424 return &mem->css;
425}
426
f64c3f54 427static struct mem_cgroup_per_zone *
97a6c37b 428page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
f64c3f54 429{
97a6c37b
JW
430 int nid = page_to_nid(page);
431 int zid = page_zonenum(page);
f64c3f54 432
f64c3f54
BS
433 return mem_cgroup_zoneinfo(mem, nid, zid);
434}
435
436static struct mem_cgroup_tree_per_zone *
437soft_limit_tree_node_zone(int nid, int zid)
438{
439 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
440}
441
442static struct mem_cgroup_tree_per_zone *
443soft_limit_tree_from_page(struct page *page)
444{
445 int nid = page_to_nid(page);
446 int zid = page_zonenum(page);
447
448 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
449}
450
451static void
4e416953 452__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
f64c3f54 453 struct mem_cgroup_per_zone *mz,
ef8745c1
KH
454 struct mem_cgroup_tree_per_zone *mctz,
455 unsigned long long new_usage_in_excess)
f64c3f54
BS
456{
457 struct rb_node **p = &mctz->rb_root.rb_node;
458 struct rb_node *parent = NULL;
459 struct mem_cgroup_per_zone *mz_node;
460
461 if (mz->on_tree)
462 return;
463
ef8745c1
KH
464 mz->usage_in_excess = new_usage_in_excess;
465 if (!mz->usage_in_excess)
466 return;
f64c3f54
BS
467 while (*p) {
468 parent = *p;
469 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
470 tree_node);
471 if (mz->usage_in_excess < mz_node->usage_in_excess)
472 p = &(*p)->rb_left;
473 /*
474 * We can't avoid mem cgroups that are over their soft
475 * limit by the same amount
476 */
477 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
478 p = &(*p)->rb_right;
479 }
480 rb_link_node(&mz->tree_node, parent, p);
481 rb_insert_color(&mz->tree_node, &mctz->rb_root);
482 mz->on_tree = true;
4e416953
BS
483}
484
485static void
486__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
487 struct mem_cgroup_per_zone *mz,
488 struct mem_cgroup_tree_per_zone *mctz)
489{
490 if (!mz->on_tree)
491 return;
492 rb_erase(&mz->tree_node, &mctz->rb_root);
493 mz->on_tree = false;
494}
495
f64c3f54
BS
496static void
497mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
498 struct mem_cgroup_per_zone *mz,
499 struct mem_cgroup_tree_per_zone *mctz)
500{
501 spin_lock(&mctz->lock);
4e416953 502 __mem_cgroup_remove_exceeded(mem, mz, mctz);
f64c3f54
BS
503 spin_unlock(&mctz->lock);
504}
505
f64c3f54
BS
506
507static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
508{
ef8745c1 509 unsigned long long excess;
f64c3f54
BS
510 struct mem_cgroup_per_zone *mz;
511 struct mem_cgroup_tree_per_zone *mctz;
4e649152
KH
512 int nid = page_to_nid(page);
513 int zid = page_zonenum(page);
f64c3f54
BS
514 mctz = soft_limit_tree_from_page(page);
515
516 /*
4e649152
KH
517 * Necessary to update all ancestors when hierarchy is used.
518 * because their event counter is not touched.
f64c3f54 519 */
4e649152
KH
520 for (; mem; mem = parent_mem_cgroup(mem)) {
521 mz = mem_cgroup_zoneinfo(mem, nid, zid);
ef8745c1 522 excess = res_counter_soft_limit_excess(&mem->res);
4e649152
KH
523 /*
524 * We have to update the tree if mz is on RB-tree or
525 * mem is over its softlimit.
526 */
ef8745c1 527 if (excess || mz->on_tree) {
4e649152
KH
528 spin_lock(&mctz->lock);
529 /* if on-tree, remove it */
530 if (mz->on_tree)
531 __mem_cgroup_remove_exceeded(mem, mz, mctz);
532 /*
ef8745c1
KH
533 * Insert again. mz->usage_in_excess will be updated.
534 * If excess is 0, no tree ops.
4e649152 535 */
ef8745c1 536 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
4e649152
KH
537 spin_unlock(&mctz->lock);
538 }
f64c3f54
BS
539 }
540}
541
542static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
543{
544 int node, zone;
545 struct mem_cgroup_per_zone *mz;
546 struct mem_cgroup_tree_per_zone *mctz;
547
548 for_each_node_state(node, N_POSSIBLE) {
549 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
550 mz = mem_cgroup_zoneinfo(mem, node, zone);
551 mctz = soft_limit_tree_node_zone(node, zone);
552 mem_cgroup_remove_exceeded(mem, mz, mctz);
553 }
554 }
555}
556
4e416953
BS
557static struct mem_cgroup_per_zone *
558__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
559{
560 struct rb_node *rightmost = NULL;
26251eaf 561 struct mem_cgroup_per_zone *mz;
4e416953
BS
562
563retry:
26251eaf 564 mz = NULL;
4e416953
BS
565 rightmost = rb_last(&mctz->rb_root);
566 if (!rightmost)
567 goto done; /* Nothing to reclaim from */
568
569 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
570 /*
571 * Remove the node now but someone else can add it back,
572 * we will to add it back at the end of reclaim to its correct
573 * position in the tree.
574 */
575 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
576 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
577 !css_tryget(&mz->mem->css))
578 goto retry;
579done:
580 return mz;
581}
582
583static struct mem_cgroup_per_zone *
584mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
585{
586 struct mem_cgroup_per_zone *mz;
587
588 spin_lock(&mctz->lock);
589 mz = __mem_cgroup_largest_soft_limit_node(mctz);
590 spin_unlock(&mctz->lock);
591 return mz;
592}
593
711d3d2c
KH
594/*
595 * Implementation Note: reading percpu statistics for memcg.
596 *
597 * Both of vmstat[] and percpu_counter has threshold and do periodic
598 * synchronization to implement "quick" read. There are trade-off between
599 * reading cost and precision of value. Then, we may have a chance to implement
600 * a periodic synchronizion of counter in memcg's counter.
601 *
602 * But this _read() function is used for user interface now. The user accounts
603 * memory usage by memory cgroup and he _always_ requires exact value because
604 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
605 * have to visit all online cpus and make sum. So, for now, unnecessary
606 * synchronization is not implemented. (just implemented for cpu hotplug)
607 *
608 * If there are kernel internal actions which can make use of some not-exact
609 * value, and reading all cpu value can be performance bottleneck in some
610 * common workload, threashold and synchonization as vmstat[] should be
611 * implemented.
612 */
7a159cc9
JW
613static long mem_cgroup_read_stat(struct mem_cgroup *mem,
614 enum mem_cgroup_stat_index idx)
c62b1a3b 615{
7a159cc9 616 long val = 0;
c62b1a3b 617 int cpu;
c62b1a3b 618
711d3d2c
KH
619 get_online_cpus();
620 for_each_online_cpu(cpu)
c62b1a3b 621 val += per_cpu(mem->stat->count[idx], cpu);
711d3d2c
KH
622#ifdef CONFIG_HOTPLUG_CPU
623 spin_lock(&mem->pcp_counter_lock);
624 val += mem->nocpu_base.count[idx];
625 spin_unlock(&mem->pcp_counter_lock);
626#endif
627 put_online_cpus();
c62b1a3b
KH
628 return val;
629}
630
0c3e73e8
BS
631static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
632 bool charge)
633{
634 int val = (charge) ? 1 : -1;
c62b1a3b 635 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
0c3e73e8
BS
636}
637
456f998e
YH
638void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
639{
640 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
641}
642
643void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
644{
645 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
646}
647
e9f8974f
JW
648static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
649 enum mem_cgroup_events_index idx)
650{
651 unsigned long val = 0;
652 int cpu;
653
654 for_each_online_cpu(cpu)
655 val += per_cpu(mem->stat->events[idx], cpu);
656#ifdef CONFIG_HOTPLUG_CPU
657 spin_lock(&mem->pcp_counter_lock);
658 val += mem->nocpu_base.events[idx];
659 spin_unlock(&mem->pcp_counter_lock);
660#endif
661 return val;
662}
663
c05555b5 664static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
e401f176 665 bool file, int nr_pages)
d52aa412 666{
c62b1a3b
KH
667 preempt_disable();
668
e401f176
KH
669 if (file)
670 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
d52aa412 671 else
e401f176 672 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
55e462b0 673
e401f176
KH
674 /* pagein of a big page is an event. So, ignore page size */
675 if (nr_pages > 0)
e9f8974f 676 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d604 677 else {
e9f8974f 678 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d604
KH
679 nr_pages = -nr_pages; /* for event */
680 }
e401f176 681
e9f8974f 682 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
2e72b634 683
c62b1a3b 684 preempt_enable();
6d12e2d8
KH
685}
686
bb2a0de9
KH
687unsigned long
688mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
689 unsigned int lru_mask)
889976db
YH
690{
691 struct mem_cgroup_per_zone *mz;
bb2a0de9
KH
692 enum lru_list l;
693 unsigned long ret = 0;
694
695 mz = mem_cgroup_zoneinfo(mem, nid, zid);
696
697 for_each_lru(l) {
698 if (BIT(l) & lru_mask)
699 ret += MEM_CGROUP_ZSTAT(mz, l);
700 }
701 return ret;
702}
703
704static unsigned long
705mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
706 int nid, unsigned int lru_mask)
707{
889976db
YH
708 u64 total = 0;
709 int zid;
710
bb2a0de9
KH
711 for (zid = 0; zid < MAX_NR_ZONES; zid++)
712 total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
713
889976db
YH
714 return total;
715}
bb2a0de9
KH
716
717static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
718 unsigned int lru_mask)
6d12e2d8 719{
889976db 720 int nid;
6d12e2d8
KH
721 u64 total = 0;
722
bb2a0de9
KH
723 for_each_node_state(nid, N_HIGH_MEMORY)
724 total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
6d12e2d8 725 return total;
d52aa412
KH
726}
727
7a159cc9
JW
728static bool __memcg_event_check(struct mem_cgroup *mem, int target)
729{
730 unsigned long val, next;
731
732 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
733 next = this_cpu_read(mem->stat->targets[target]);
734 /* from time_after() in jiffies.h */
735 return ((long)next - (long)val < 0);
736}
737
738static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
d2265e6f 739{
7a159cc9 740 unsigned long val, next;
d2265e6f 741
e9f8974f 742 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
d2265e6f 743
7a159cc9
JW
744 switch (target) {
745 case MEM_CGROUP_TARGET_THRESH:
746 next = val + THRESHOLDS_EVENTS_TARGET;
747 break;
748 case MEM_CGROUP_TARGET_SOFTLIMIT:
749 next = val + SOFTLIMIT_EVENTS_TARGET;
750 break;
453a9bf3
KH
751 case MEM_CGROUP_TARGET_NUMAINFO:
752 next = val + NUMAINFO_EVENTS_TARGET;
753 break;
7a159cc9
JW
754 default:
755 return;
756 }
757
758 this_cpu_write(mem->stat->targets[target], next);
d2265e6f
KH
759}
760
761/*
762 * Check events in order.
763 *
764 */
765static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
766{
767 /* threshold event is triggered in finer grain than soft limit */
7a159cc9 768 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
d2265e6f 769 mem_cgroup_threshold(mem);
7a159cc9
JW
770 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
771 if (unlikely(__memcg_event_check(mem,
453a9bf3 772 MEM_CGROUP_TARGET_SOFTLIMIT))) {
d2265e6f 773 mem_cgroup_update_tree(mem, page);
7a159cc9 774 __mem_cgroup_target_update(mem,
453a9bf3
KH
775 MEM_CGROUP_TARGET_SOFTLIMIT);
776 }
777#if MAX_NUMNODES > 1
778 if (unlikely(__memcg_event_check(mem,
779 MEM_CGROUP_TARGET_NUMAINFO))) {
780 atomic_inc(&mem->numainfo_events);
781 __mem_cgroup_target_update(mem,
782 MEM_CGROUP_TARGET_NUMAINFO);
7a159cc9 783 }
453a9bf3 784#endif
d2265e6f
KH
785 }
786}
787
d5b69e38 788static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0
BS
789{
790 return container_of(cgroup_subsys_state(cont,
791 mem_cgroup_subsys_id), struct mem_cgroup,
792 css);
793}
794
cf475ad2 795struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 796{
31a78f23
BS
797 /*
798 * mm_update_next_owner() may clear mm->owner to NULL
799 * if it races with swapoff, page migration, etc.
800 * So this can be called with p == NULL.
801 */
802 if (unlikely(!p))
803 return NULL;
804
78fb7466
PE
805 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
806 struct mem_cgroup, css);
807}
808
a433658c 809struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2
KH
810{
811 struct mem_cgroup *mem = NULL;
0b7f569e
KH
812
813 if (!mm)
814 return NULL;
54595fe2
KH
815 /*
816 * Because we have no locks, mm->owner's may be being moved to other
817 * cgroup. We use css_tryget() here even if this looks
818 * pessimistic (rather than adding locks here).
819 */
820 rcu_read_lock();
821 do {
822 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
823 if (unlikely(!mem))
824 break;
825 } while (!css_tryget(&mem->css));
826 rcu_read_unlock();
827 return mem;
828}
829
7d74b06f
KH
830/* The caller has to guarantee "mem" exists before calling this */
831static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
14067bb3 832{
711d3d2c
KH
833 struct cgroup_subsys_state *css;
834 int found;
835
836 if (!mem) /* ROOT cgroup has the smallest ID */
837 return root_mem_cgroup; /*css_put/get against root is ignored*/
838 if (!mem->use_hierarchy) {
839 if (css_tryget(&mem->css))
840 return mem;
841 return NULL;
842 }
843 rcu_read_lock();
844 /*
845 * searching a memory cgroup which has the smallest ID under given
846 * ROOT cgroup. (ID >= 1)
847 */
848 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
849 if (css && css_tryget(css))
850 mem = container_of(css, struct mem_cgroup, css);
851 else
852 mem = NULL;
853 rcu_read_unlock();
854 return mem;
7d74b06f
KH
855}
856
857static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
858 struct mem_cgroup *root,
859 bool cond)
860{
861 int nextid = css_id(&iter->css) + 1;
862 int found;
863 int hierarchy_used;
14067bb3 864 struct cgroup_subsys_state *css;
14067bb3 865
7d74b06f 866 hierarchy_used = iter->use_hierarchy;
14067bb3 867
7d74b06f 868 css_put(&iter->css);
711d3d2c
KH
869 /* If no ROOT, walk all, ignore hierarchy */
870 if (!cond || (root && !hierarchy_used))
7d74b06f 871 return NULL;
14067bb3 872
711d3d2c
KH
873 if (!root)
874 root = root_mem_cgroup;
875
7d74b06f
KH
876 do {
877 iter = NULL;
14067bb3 878 rcu_read_lock();
7d74b06f
KH
879
880 css = css_get_next(&mem_cgroup_subsys, nextid,
881 &root->css, &found);
14067bb3 882 if (css && css_tryget(css))
7d74b06f 883 iter = container_of(css, struct mem_cgroup, css);
14067bb3 884 rcu_read_unlock();
7d74b06f 885 /* If css is NULL, no more cgroups will be found */
14067bb3 886 nextid = found + 1;
7d74b06f 887 } while (css && !iter);
14067bb3 888
7d74b06f 889 return iter;
14067bb3 890}
7d74b06f
KH
891/*
892 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
893 * be careful that "break" loop is not allowed. We have reference count.
894 * Instead of that modify "cond" to be false and "continue" to exit the loop.
895 */
896#define for_each_mem_cgroup_tree_cond(iter, root, cond) \
897 for (iter = mem_cgroup_start_loop(root);\
898 iter != NULL;\
899 iter = mem_cgroup_get_next(iter, root, cond))
900
901#define for_each_mem_cgroup_tree(iter, root) \
902 for_each_mem_cgroup_tree_cond(iter, root, true)
903
711d3d2c
KH
904#define for_each_mem_cgroup_all(iter) \
905 for_each_mem_cgroup_tree_cond(iter, NULL, true)
906
14067bb3 907
4b3bde4c
BS
908static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
909{
910 return (mem == root_mem_cgroup);
911}
912
456f998e
YH
913void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
914{
915 struct mem_cgroup *mem;
916
917 if (!mm)
918 return;
919
920 rcu_read_lock();
921 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
922 if (unlikely(!mem))
923 goto out;
924
925 switch (idx) {
926 case PGMAJFAULT:
927 mem_cgroup_pgmajfault(mem, 1);
928 break;
929 case PGFAULT:
930 mem_cgroup_pgfault(mem, 1);
931 break;
932 default:
933 BUG();
934 }
935out:
936 rcu_read_unlock();
937}
938EXPORT_SYMBOL(mem_cgroup_count_vm_event);
939
08e552c6
KH
940/*
941 * Following LRU functions are allowed to be used without PCG_LOCK.
942 * Operations are called by routine of global LRU independently from memcg.
943 * What we have to take care of here is validness of pc->mem_cgroup.
944 *
945 * Changes to pc->mem_cgroup happens when
946 * 1. charge
947 * 2. moving account
948 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
949 * It is added to LRU before charge.
950 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
951 * When moving account, the page is not on LRU. It's isolated.
952 */
4f98a2fe 953
08e552c6
KH
954void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
955{
956 struct page_cgroup *pc;
08e552c6 957 struct mem_cgroup_per_zone *mz;
6d12e2d8 958
f8d66542 959 if (mem_cgroup_disabled())
08e552c6
KH
960 return;
961 pc = lookup_page_cgroup(page);
962 /* can happen while we handle swapcache. */
4b3bde4c 963 if (!TestClearPageCgroupAcctLRU(pc))
08e552c6 964 return;
4b3bde4c 965 VM_BUG_ON(!pc->mem_cgroup);
544122e5
KH
966 /*
967 * We don't check PCG_USED bit. It's cleared when the "page" is finally
968 * removed from global LRU.
969 */
97a6c37b 970 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
ece35ca8
KH
971 /* huge page split is done under lru_lock. so, we have no races. */
972 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
4b3bde4c
BS
973 if (mem_cgroup_is_root(pc->mem_cgroup))
974 return;
975 VM_BUG_ON(list_empty(&pc->lru));
08e552c6 976 list_del_init(&pc->lru);
6d12e2d8
KH
977}
978
08e552c6 979void mem_cgroup_del_lru(struct page *page)
6d12e2d8 980{
08e552c6
KH
981 mem_cgroup_del_lru_list(page, page_lru(page));
982}
b69408e8 983
3f58a829
MK
984/*
985 * Writeback is about to end against a page which has been marked for immediate
986 * reclaim. If it still appears to be reclaimable, move it to the tail of the
987 * inactive list.
988 */
989void mem_cgroup_rotate_reclaimable_page(struct page *page)
990{
991 struct mem_cgroup_per_zone *mz;
992 struct page_cgroup *pc;
993 enum lru_list lru = page_lru(page);
994
995 if (mem_cgroup_disabled())
996 return;
997
998 pc = lookup_page_cgroup(page);
999 /* unused or root page is not rotated. */
1000 if (!PageCgroupUsed(pc))
1001 return;
1002 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1003 smp_rmb();
1004 if (mem_cgroup_is_root(pc->mem_cgroup))
1005 return;
97a6c37b 1006 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
3f58a829
MK
1007 list_move_tail(&pc->lru, &mz->lists[lru]);
1008}
1009
08e552c6
KH
1010void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
1011{
1012 struct mem_cgroup_per_zone *mz;
1013 struct page_cgroup *pc;
b69408e8 1014
f8d66542 1015 if (mem_cgroup_disabled())
08e552c6 1016 return;
6d12e2d8 1017
08e552c6 1018 pc = lookup_page_cgroup(page);
4b3bde4c 1019 /* unused or root page is not rotated. */
713735b4
JW
1020 if (!PageCgroupUsed(pc))
1021 return;
1022 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1023 smp_rmb();
1024 if (mem_cgroup_is_root(pc->mem_cgroup))
08e552c6 1025 return;
97a6c37b 1026 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
08e552c6 1027 list_move(&pc->lru, &mz->lists[lru]);
6d12e2d8
KH
1028}
1029
08e552c6 1030void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
66e1707b 1031{
08e552c6
KH
1032 struct page_cgroup *pc;
1033 struct mem_cgroup_per_zone *mz;
6d12e2d8 1034
f8d66542 1035 if (mem_cgroup_disabled())
08e552c6
KH
1036 return;
1037 pc = lookup_page_cgroup(page);
4b3bde4c 1038 VM_BUG_ON(PageCgroupAcctLRU(pc));
08e552c6 1039 if (!PageCgroupUsed(pc))
894bc310 1040 return;
713735b4
JW
1041 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1042 smp_rmb();
97a6c37b 1043 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
ece35ca8
KH
1044 /* huge page split is done under lru_lock. so, we have no races. */
1045 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
4b3bde4c
BS
1046 SetPageCgroupAcctLRU(pc);
1047 if (mem_cgroup_is_root(pc->mem_cgroup))
1048 return;
08e552c6
KH
1049 list_add(&pc->lru, &mz->lists[lru]);
1050}
544122e5 1051
08e552c6 1052/*
5a6475a4
KH
1053 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1054 * while it's linked to lru because the page may be reused after it's fully
1055 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1056 * It's done under lock_page and expected that zone->lru_lock isnever held.
08e552c6 1057 */
5a6475a4 1058static void mem_cgroup_lru_del_before_commit(struct page *page)
08e552c6 1059{
544122e5
KH
1060 unsigned long flags;
1061 struct zone *zone = page_zone(page);
1062 struct page_cgroup *pc = lookup_page_cgroup(page);
1063
5a6475a4
KH
1064 /*
1065 * Doing this check without taking ->lru_lock seems wrong but this
1066 * is safe. Because if page_cgroup's USED bit is unset, the page
1067 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1068 * set, the commit after this will fail, anyway.
1069 * This all charge/uncharge is done under some mutual execustion.
1070 * So, we don't need to taking care of changes in USED bit.
1071 */
1072 if (likely(!PageLRU(page)))
1073 return;
1074
544122e5
KH
1075 spin_lock_irqsave(&zone->lru_lock, flags);
1076 /*
1077 * Forget old LRU when this page_cgroup is *not* used. This Used bit
1078 * is guarded by lock_page() because the page is SwapCache.
1079 */
1080 if (!PageCgroupUsed(pc))
1081 mem_cgroup_del_lru_list(page, page_lru(page));
1082 spin_unlock_irqrestore(&zone->lru_lock, flags);
08e552c6
KH
1083}
1084
5a6475a4 1085static void mem_cgroup_lru_add_after_commit(struct page *page)
544122e5
KH
1086{
1087 unsigned long flags;
1088 struct zone *zone = page_zone(page);
1089 struct page_cgroup *pc = lookup_page_cgroup(page);
1090
5a6475a4
KH
1091 /* taking care of that the page is added to LRU while we commit it */
1092 if (likely(!PageLRU(page)))
1093 return;
544122e5
KH
1094 spin_lock_irqsave(&zone->lru_lock, flags);
1095 /* link when the page is linked to LRU but page_cgroup isn't */
4b3bde4c 1096 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
544122e5
KH
1097 mem_cgroup_add_lru_list(page, page_lru(page));
1098 spin_unlock_irqrestore(&zone->lru_lock, flags);
1099}
1100
1101
08e552c6
KH
1102void mem_cgroup_move_lists(struct page *page,
1103 enum lru_list from, enum lru_list to)
1104{
f8d66542 1105 if (mem_cgroup_disabled())
08e552c6
KH
1106 return;
1107 mem_cgroup_del_lru_list(page, from);
1108 mem_cgroup_add_lru_list(page, to);
66e1707b
BS
1109}
1110
4c4a2214
DR
1111int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
1112{
1113 int ret;
0b7f569e 1114 struct mem_cgroup *curr = NULL;
158e0a2d 1115 struct task_struct *p;
4c4a2214 1116
158e0a2d
KH
1117 p = find_lock_task_mm(task);
1118 if (!p)
1119 return 0;
1120 curr = try_get_mem_cgroup_from_mm(p->mm);
1121 task_unlock(p);
0b7f569e
KH
1122 if (!curr)
1123 return 0;
d31f56db
DN
1124 /*
1125 * We should check use_hierarchy of "mem" not "curr". Because checking
1126 * use_hierarchy of "curr" here make this function true if hierarchy is
1127 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
1128 * hierarchy(even if use_hierarchy is disabled in "mem").
1129 */
1130 if (mem->use_hierarchy)
0b7f569e
KH
1131 ret = css_is_ancestor(&curr->css, &mem->css);
1132 else
1133 ret = (curr == mem);
1134 css_put(&curr->css);
4c4a2214
DR
1135 return ret;
1136}
1137
c772be93 1138static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
14797e23
KM
1139{
1140 unsigned long active;
1141 unsigned long inactive;
c772be93
KM
1142 unsigned long gb;
1143 unsigned long inactive_ratio;
14797e23 1144
bb2a0de9
KH
1145 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
1146 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
14797e23 1147
c772be93
KM
1148 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1149 if (gb)
1150 inactive_ratio = int_sqrt(10 * gb);
1151 else
1152 inactive_ratio = 1;
1153
1154 if (present_pages) {
1155 present_pages[0] = inactive;
1156 present_pages[1] = active;
1157 }
1158
1159 return inactive_ratio;
1160}
1161
1162int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
1163{
1164 unsigned long active;
1165 unsigned long inactive;
1166 unsigned long present_pages[2];
1167 unsigned long inactive_ratio;
1168
1169 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1170
1171 inactive = present_pages[0];
1172 active = present_pages[1];
1173
1174 if (inactive * inactive_ratio < active)
14797e23
KM
1175 return 1;
1176
1177 return 0;
1178}
1179
56e49d21
RR
1180int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1181{
1182 unsigned long active;
1183 unsigned long inactive;
1184
bb2a0de9
KH
1185 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
1186 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
56e49d21
RR
1187
1188 return (active > inactive);
1189}
1190
3e2f41f1
KM
1191struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1192 struct zone *zone)
1193{
13d7e3a2 1194 int nid = zone_to_nid(zone);
3e2f41f1
KM
1195 int zid = zone_idx(zone);
1196 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1197
1198 return &mz->reclaim_stat;
1199}
1200
1201struct zone_reclaim_stat *
1202mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1203{
1204 struct page_cgroup *pc;
1205 struct mem_cgroup_per_zone *mz;
1206
1207 if (mem_cgroup_disabled())
1208 return NULL;
1209
1210 pc = lookup_page_cgroup(page);
bd112db8
DN
1211 if (!PageCgroupUsed(pc))
1212 return NULL;
713735b4
JW
1213 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1214 smp_rmb();
97a6c37b 1215 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
3e2f41f1
KM
1216 return &mz->reclaim_stat;
1217}
1218
66e1707b
BS
1219unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1220 struct list_head *dst,
1221 unsigned long *scanned, int order,
1222 int mode, struct zone *z,
1223 struct mem_cgroup *mem_cont,
4f98a2fe 1224 int active, int file)
66e1707b
BS
1225{
1226 unsigned long nr_taken = 0;
1227 struct page *page;
1228 unsigned long scan;
1229 LIST_HEAD(pc_list);
1230 struct list_head *src;
ff7283fa 1231 struct page_cgroup *pc, *tmp;
13d7e3a2 1232 int nid = zone_to_nid(z);
1ecaab2b
KH
1233 int zid = zone_idx(z);
1234 struct mem_cgroup_per_zone *mz;
b7c46d15 1235 int lru = LRU_FILE * file + active;
2ffebca6 1236 int ret;
66e1707b 1237
cf475ad2 1238 BUG_ON(!mem_cont);
1ecaab2b 1239 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
b69408e8 1240 src = &mz->lists[lru];
66e1707b 1241
ff7283fa
KH
1242 scan = 0;
1243 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 1244 if (scan >= nr_to_scan)
ff7283fa 1245 break;
08e552c6 1246
52d4b9ac
KH
1247 if (unlikely(!PageCgroupUsed(pc)))
1248 continue;
5564e88b 1249
6b3ae58e 1250 page = lookup_cgroup_page(pc);
5564e88b 1251
436c6541 1252 if (unlikely(!PageLRU(page)))
ff7283fa 1253 continue;
ff7283fa 1254
436c6541 1255 scan++;
2ffebca6
KH
1256 ret = __isolate_lru_page(page, mode, file);
1257 switch (ret) {
1258 case 0:
66e1707b 1259 list_move(&page->lru, dst);
2ffebca6 1260 mem_cgroup_del_lru(page);
2c888cfb 1261 nr_taken += hpage_nr_pages(page);
2ffebca6
KH
1262 break;
1263 case -EBUSY:
1264 /* we don't affect global LRU but rotate in our LRU */
1265 mem_cgroup_rotate_lru_list(page, page_lru(page));
1266 break;
1267 default:
1268 break;
66e1707b
BS
1269 }
1270 }
1271
66e1707b 1272 *scanned = scan;
cc8e970c
KM
1273
1274 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1275 0, 0, 0, mode);
1276
66e1707b
BS
1277 return nr_taken;
1278}
1279
6d61ef40
BS
1280#define mem_cgroup_from_res_counter(counter, member) \
1281 container_of(counter, struct mem_cgroup, member)
1282
19942822 1283/**
9d11ea9f
JW
1284 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1285 * @mem: the memory cgroup
19942822 1286 *
9d11ea9f 1287 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1288 * pages.
19942822 1289 */
7ec99d62 1290static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
19942822 1291{
9d11ea9f
JW
1292 unsigned long long margin;
1293
1294 margin = res_counter_margin(&mem->res);
1295 if (do_swap_account)
1296 margin = min(margin, res_counter_margin(&mem->memsw));
7ec99d62 1297 return margin >> PAGE_SHIFT;
19942822
JW
1298}
1299
1f4c025b 1300int mem_cgroup_swappiness(struct mem_cgroup *memcg)
a7885eb8
KM
1301{
1302 struct cgroup *cgrp = memcg->css.cgroup;
a7885eb8
KM
1303
1304 /* root ? */
1305 if (cgrp->parent == NULL)
1306 return vm_swappiness;
1307
bf1ff263 1308 return memcg->swappiness;
a7885eb8
KM
1309}
1310
32047e2a
KH
1311static void mem_cgroup_start_move(struct mem_cgroup *mem)
1312{
1313 int cpu;
1489ebad
KH
1314
1315 get_online_cpus();
1316 spin_lock(&mem->pcp_counter_lock);
1317 for_each_online_cpu(cpu)
32047e2a 1318 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1489ebad
KH
1319 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1320 spin_unlock(&mem->pcp_counter_lock);
1321 put_online_cpus();
32047e2a
KH
1322
1323 synchronize_rcu();
1324}
1325
1326static void mem_cgroup_end_move(struct mem_cgroup *mem)
1327{
1328 int cpu;
1329
1330 if (!mem)
1331 return;
1489ebad
KH
1332 get_online_cpus();
1333 spin_lock(&mem->pcp_counter_lock);
1334 for_each_online_cpu(cpu)
32047e2a 1335 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1489ebad
KH
1336 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1337 spin_unlock(&mem->pcp_counter_lock);
1338 put_online_cpus();
32047e2a
KH
1339}
1340/*
1341 * 2 routines for checking "mem" is under move_account() or not.
1342 *
1343 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1344 * for avoiding race in accounting. If true,
1345 * pc->mem_cgroup may be overwritten.
1346 *
1347 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1348 * under hierarchy of moving cgroups. This is for
1349 * waiting at hith-memory prressure caused by "move".
1350 */
1351
1352static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1353{
1354 VM_BUG_ON(!rcu_read_lock_held());
1355 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1356}
4b534334
KH
1357
1358static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1359{
2bd9bb20
KH
1360 struct mem_cgroup *from;
1361 struct mem_cgroup *to;
4b534334 1362 bool ret = false;
2bd9bb20
KH
1363 /*
1364 * Unlike task_move routines, we access mc.to, mc.from not under
1365 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1366 */
1367 spin_lock(&mc.lock);
1368 from = mc.from;
1369 to = mc.to;
1370 if (!from)
1371 goto unlock;
1372 if (from == mem || to == mem
1373 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1374 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1375 ret = true;
1376unlock:
1377 spin_unlock(&mc.lock);
4b534334
KH
1378 return ret;
1379}
1380
1381static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1382{
1383 if (mc.moving_task && current != mc.moving_task) {
1384 if (mem_cgroup_under_move(mem)) {
1385 DEFINE_WAIT(wait);
1386 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1387 /* moving charge context might have finished. */
1388 if (mc.moving_task)
1389 schedule();
1390 finish_wait(&mc.waitq, &wait);
1391 return true;
1392 }
1393 }
1394 return false;
1395}
1396
e222432b 1397/**
6a6135b6 1398 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
e222432b
BS
1399 * @memcg: The memory cgroup that went over limit
1400 * @p: Task that is going to be killed
1401 *
1402 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1403 * enabled
1404 */
1405void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1406{
1407 struct cgroup *task_cgrp;
1408 struct cgroup *mem_cgrp;
1409 /*
1410 * Need a buffer in BSS, can't rely on allocations. The code relies
1411 * on the assumption that OOM is serialized for memory controller.
1412 * If this assumption is broken, revisit this code.
1413 */
1414 static char memcg_name[PATH_MAX];
1415 int ret;
1416
d31f56db 1417 if (!memcg || !p)
e222432b
BS
1418 return;
1419
1420
1421 rcu_read_lock();
1422
1423 mem_cgrp = memcg->css.cgroup;
1424 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1425
1426 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1427 if (ret < 0) {
1428 /*
1429 * Unfortunately, we are unable to convert to a useful name
1430 * But we'll still print out the usage information
1431 */
1432 rcu_read_unlock();
1433 goto done;
1434 }
1435 rcu_read_unlock();
1436
1437 printk(KERN_INFO "Task in %s killed", memcg_name);
1438
1439 rcu_read_lock();
1440 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1441 if (ret < 0) {
1442 rcu_read_unlock();
1443 goto done;
1444 }
1445 rcu_read_unlock();
1446
1447 /*
1448 * Continues from above, so we don't need an KERN_ level
1449 */
1450 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1451done:
1452
1453 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1454 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1455 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1456 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1457 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1458 "failcnt %llu\n",
1459 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1460 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1461 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1462}
1463
81d39c20
KH
1464/*
1465 * This function returns the number of memcg under hierarchy tree. Returns
1466 * 1(self count) if no children.
1467 */
1468static int mem_cgroup_count_children(struct mem_cgroup *mem)
1469{
1470 int num = 0;
7d74b06f
KH
1471 struct mem_cgroup *iter;
1472
1473 for_each_mem_cgroup_tree(iter, mem)
1474 num++;
81d39c20
KH
1475 return num;
1476}
1477
a63d83f4
DR
1478/*
1479 * Return the memory (and swap, if configured) limit for a memcg.
1480 */
1481u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1482{
1483 u64 limit;
1484 u64 memsw;
1485
f3e8eb70
JW
1486 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1487 limit += total_swap_pages << PAGE_SHIFT;
1488
a63d83f4
DR
1489 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1490 /*
1491 * If memsw is finite and limits the amount of swap space available
1492 * to this memcg, return that limit.
1493 */
1494 return min(limit, memsw);
1495}
1496
6d61ef40 1497/*
04046e1a
KH
1498 * Visit the first child (need not be the first child as per the ordering
1499 * of the cgroup list, since we track last_scanned_child) of @mem and use
1500 * that to reclaim free pages from.
1501 */
1502static struct mem_cgroup *
1503mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1504{
1505 struct mem_cgroup *ret = NULL;
1506 struct cgroup_subsys_state *css;
1507 int nextid, found;
1508
1509 if (!root_mem->use_hierarchy) {
1510 css_get(&root_mem->css);
1511 ret = root_mem;
1512 }
1513
1514 while (!ret) {
1515 rcu_read_lock();
1516 nextid = root_mem->last_scanned_child + 1;
1517 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1518 &found);
1519 if (css && css_tryget(css))
1520 ret = container_of(css, struct mem_cgroup, css);
1521
1522 rcu_read_unlock();
1523 /* Updates scanning parameter */
04046e1a
KH
1524 if (!css) {
1525 /* this means start scan from ID:1 */
1526 root_mem->last_scanned_child = 0;
1527 } else
1528 root_mem->last_scanned_child = found;
04046e1a
KH
1529 }
1530
1531 return ret;
1532}
1533
4d0c066d
KH
1534/**
1535 * test_mem_cgroup_node_reclaimable
1536 * @mem: the target memcg
1537 * @nid: the node ID to be checked.
1538 * @noswap : specify true here if the user wants flle only information.
1539 *
1540 * This function returns whether the specified memcg contains any
1541 * reclaimable pages on a node. Returns true if there are any reclaimable
1542 * pages in the node.
1543 */
1544static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
1545 int nid, bool noswap)
1546{
bb2a0de9 1547 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
4d0c066d
KH
1548 return true;
1549 if (noswap || !total_swap_pages)
1550 return false;
bb2a0de9 1551 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
4d0c066d
KH
1552 return true;
1553 return false;
1554
1555}
889976db
YH
1556#if MAX_NUMNODES > 1
1557
1558/*
1559 * Always updating the nodemask is not very good - even if we have an empty
1560 * list or the wrong list here, we can start from some node and traverse all
1561 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1562 *
1563 */
1564static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1565{
1566 int nid;
453a9bf3
KH
1567 /*
1568 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1569 * pagein/pageout changes since the last update.
1570 */
1571 if (!atomic_read(&mem->numainfo_events))
1572 return;
1573 if (atomic_inc_return(&mem->numainfo_updating) > 1)
889976db
YH
1574 return;
1575
889976db
YH
1576 /* make a nodemask where this memcg uses memory from */
1577 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1578
1579 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1580
4d0c066d
KH
1581 if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
1582 node_clear(nid, mem->scan_nodes);
889976db 1583 }
453a9bf3
KH
1584
1585 atomic_set(&mem->numainfo_events, 0);
1586 atomic_set(&mem->numainfo_updating, 0);
889976db
YH
1587}
1588
1589/*
1590 * Selecting a node where we start reclaim from. Because what we need is just
1591 * reducing usage counter, start from anywhere is O,K. Considering
1592 * memory reclaim from current node, there are pros. and cons.
1593 *
1594 * Freeing memory from current node means freeing memory from a node which
1595 * we'll use or we've used. So, it may make LRU bad. And if several threads
1596 * hit limits, it will see a contention on a node. But freeing from remote
1597 * node means more costs for memory reclaim because of memory latency.
1598 *
1599 * Now, we use round-robin. Better algorithm is welcomed.
1600 */
1601int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1602{
1603 int node;
1604
1605 mem_cgroup_may_update_nodemask(mem);
1606 node = mem->last_scanned_node;
1607
1608 node = next_node(node, mem->scan_nodes);
1609 if (node == MAX_NUMNODES)
1610 node = first_node(mem->scan_nodes);
1611 /*
1612 * We call this when we hit limit, not when pages are added to LRU.
1613 * No LRU may hold pages because all pages are UNEVICTABLE or
1614 * memcg is too small and all pages are not on LRU. In that case,
1615 * we use curret node.
1616 */
1617 if (unlikely(node == MAX_NUMNODES))
1618 node = numa_node_id();
1619
1620 mem->last_scanned_node = node;
1621 return node;
1622}
1623
4d0c066d
KH
1624/*
1625 * Check all nodes whether it contains reclaimable pages or not.
1626 * For quick scan, we make use of scan_nodes. This will allow us to skip
1627 * unused nodes. But scan_nodes is lazily updated and may not cotain
1628 * enough new information. We need to do double check.
1629 */
1630bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1631{
1632 int nid;
1633
1634 /*
1635 * quick check...making use of scan_node.
1636 * We can skip unused nodes.
1637 */
1638 if (!nodes_empty(mem->scan_nodes)) {
1639 for (nid = first_node(mem->scan_nodes);
1640 nid < MAX_NUMNODES;
1641 nid = next_node(nid, mem->scan_nodes)) {
1642
1643 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1644 return true;
1645 }
1646 }
1647 /*
1648 * Check rest of nodes.
1649 */
1650 for_each_node_state(nid, N_HIGH_MEMORY) {
1651 if (node_isset(nid, mem->scan_nodes))
1652 continue;
1653 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1654 return true;
1655 }
1656 return false;
1657}
1658
889976db
YH
1659#else
1660int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1661{
1662 return 0;
1663}
4d0c066d
KH
1664
1665bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1666{
1667 return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
1668}
889976db
YH
1669#endif
1670
82f9d486
KH
1671static void __mem_cgroup_record_scanstat(unsigned long *stats,
1672 struct memcg_scanrecord *rec)
1673{
1674
1675 stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1];
1676 stats[SCAN_ANON] += rec->nr_scanned[0];
1677 stats[SCAN_FILE] += rec->nr_scanned[1];
1678
1679 stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1];
1680 stats[ROTATE_ANON] += rec->nr_rotated[0];
1681 stats[ROTATE_FILE] += rec->nr_rotated[1];
1682
1683 stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1];
1684 stats[FREED_ANON] += rec->nr_freed[0];
1685 stats[FREED_FILE] += rec->nr_freed[1];
1686
1687 stats[ELAPSED] += rec->elapsed;
1688}
1689
1690static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec)
1691{
1692 struct mem_cgroup *mem;
1693 int context = rec->context;
1694
1695 if (context >= NR_SCAN_CONTEXT)
1696 return;
1697
1698 mem = rec->mem;
1699 spin_lock(&mem->scanstat.lock);
1700 __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec);
1701 spin_unlock(&mem->scanstat.lock);
1702
1703 mem = rec->root;
1704 spin_lock(&mem->scanstat.lock);
1705 __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec);
1706 spin_unlock(&mem->scanstat.lock);
1707}
1708
04046e1a
KH
1709/*
1710 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1711 * we reclaimed from, so that we don't end up penalizing one child extensively
1712 * based on its position in the children list.
6d61ef40
BS
1713 *
1714 * root_mem is the original ancestor that we've been reclaim from.
04046e1a
KH
1715 *
1716 * We give up and return to the caller when we visit root_mem twice.
1717 * (other groups can be removed while we're walking....)
81d39c20
KH
1718 *
1719 * If shrink==true, for avoiding to free too much, this returns immedieately.
6d61ef40
BS
1720 */
1721static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
4e416953 1722 struct zone *zone,
75822b44 1723 gfp_t gfp_mask,
0ae5e89c
YH
1724 unsigned long reclaim_options,
1725 unsigned long *total_scanned)
6d61ef40 1726{
04046e1a
KH
1727 struct mem_cgroup *victim;
1728 int ret, total = 0;
1729 int loop = 0;
75822b44
BS
1730 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1731 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
4e416953 1732 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
82f9d486 1733 struct memcg_scanrecord rec;
9d11ea9f 1734 unsigned long excess;
82f9d486 1735 unsigned long scanned;
9d11ea9f
JW
1736
1737 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
04046e1a 1738
22a668d7 1739 /* If memsw_is_minimum==1, swap-out is of-no-use. */
108b6a78 1740 if (!check_soft && !shrink && root_mem->memsw_is_minimum)
22a668d7
KH
1741 noswap = true;
1742
82f9d486
KH
1743 if (shrink)
1744 rec.context = SCAN_BY_SHRINK;
1745 else if (check_soft)
1746 rec.context = SCAN_BY_SYSTEM;
1747 else
1748 rec.context = SCAN_BY_LIMIT;
1749
1750 rec.root = root_mem;
1751
4e416953 1752 while (1) {
04046e1a 1753 victim = mem_cgroup_select_victim(root_mem);
4e416953 1754 if (victim == root_mem) {
04046e1a 1755 loop++;
fbc29a25
KH
1756 /*
1757 * We are not draining per cpu cached charges during
1758 * soft limit reclaim because global reclaim doesn't
1759 * care about charges. It tries to free some memory and
1760 * charges will not give any.
1761 */
1762 if (!check_soft && loop >= 1)
26fe6168 1763 drain_all_stock_async(root_mem);
4e416953
BS
1764 if (loop >= 2) {
1765 /*
1766 * If we have not been able to reclaim
1767 * anything, it might because there are
1768 * no reclaimable pages under this hierarchy
1769 */
1770 if (!check_soft || !total) {
1771 css_put(&victim->css);
1772 break;
1773 }
1774 /*
25985edc 1775 * We want to do more targeted reclaim.
4e416953
BS
1776 * excess >> 2 is not to excessive so as to
1777 * reclaim too much, nor too less that we keep
1778 * coming back to reclaim from this cgroup
1779 */
1780 if (total >= (excess >> 2) ||
1781 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1782 css_put(&victim->css);
1783 break;
1784 }
1785 }
1786 }
4d0c066d 1787 if (!mem_cgroup_reclaimable(victim, noswap)) {
04046e1a
KH
1788 /* this cgroup's local usage == 0 */
1789 css_put(&victim->css);
6d61ef40
BS
1790 continue;
1791 }
82f9d486
KH
1792 rec.mem = victim;
1793 rec.nr_scanned[0] = 0;
1794 rec.nr_scanned[1] = 0;
1795 rec.nr_rotated[0] = 0;
1796 rec.nr_rotated[1] = 0;
1797 rec.nr_freed[0] = 0;
1798 rec.nr_freed[1] = 0;
1799 rec.elapsed = 0;
04046e1a 1800 /* we use swappiness of local cgroup */
0ae5e89c 1801 if (check_soft) {
4e416953 1802 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
82f9d486
KH
1803 noswap, zone, &rec, &scanned);
1804 *total_scanned += scanned;
0ae5e89c 1805 } else
4e416953 1806 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
82f9d486
KH
1807 noswap, &rec);
1808 mem_cgroup_record_scanstat(&rec);
04046e1a 1809 css_put(&victim->css);
81d39c20
KH
1810 /*
1811 * At shrinking usage, we can't check we should stop here or
1812 * reclaim more. It's depends on callers. last_scanned_child
1813 * will work enough for keeping fairness under tree.
1814 */
1815 if (shrink)
1816 return ret;
04046e1a 1817 total += ret;
4e416953 1818 if (check_soft) {
9d11ea9f 1819 if (!res_counter_soft_limit_excess(&root_mem->res))
4e416953 1820 return total;
9d11ea9f 1821 } else if (mem_cgroup_margin(root_mem))
4fd14ebf 1822 return total;
6d61ef40 1823 }
04046e1a 1824 return total;
6d61ef40
BS
1825}
1826
867578cb
KH
1827/*
1828 * Check OOM-Killer is already running under our hierarchy.
1829 * If someone is running, return false.
1af8efe9 1830 * Has to be called with memcg_oom_lock
867578cb
KH
1831 */
1832static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1833{
79dfdacc
MH
1834 int lock_count = -1;
1835 struct mem_cgroup *iter, *failed = NULL;
1836 bool cond = true;
a636b327 1837
79dfdacc
MH
1838 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1839 bool locked = iter->oom_lock;
1840
1841 iter->oom_lock = true;
1842 if (lock_count == -1)
1843 lock_count = iter->oom_lock;
1844 else if (lock_count != locked) {
1845 /*
1846 * this subtree of our hierarchy is already locked
1847 * so we cannot give a lock.
1848 */
1849 lock_count = 0;
1850 failed = iter;
1851 cond = false;
1852 }
7d74b06f 1853 }
867578cb 1854
79dfdacc
MH
1855 if (!failed)
1856 goto done;
1857
1858 /*
1859 * OK, we failed to lock the whole subtree so we have to clean up
1860 * what we set up to the failing subtree
1861 */
1862 cond = true;
1863 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1864 if (iter == failed) {
1865 cond = false;
1866 continue;
1867 }
1868 iter->oom_lock = false;
1869 }
1870done:
1871 return lock_count;
a636b327 1872}
0b7f569e 1873
79dfdacc 1874/*
1af8efe9 1875 * Has to be called with memcg_oom_lock
79dfdacc 1876 */
7d74b06f 1877static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
0b7f569e 1878{
7d74b06f
KH
1879 struct mem_cgroup *iter;
1880
79dfdacc
MH
1881 for_each_mem_cgroup_tree(iter, mem)
1882 iter->oom_lock = false;
1883 return 0;
1884}
1885
1886static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
1887{
1888 struct mem_cgroup *iter;
1889
1890 for_each_mem_cgroup_tree(iter, mem)
1891 atomic_inc(&iter->under_oom);
1892}
1893
1894static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
1895{
1896 struct mem_cgroup *iter;
1897
867578cb
KH
1898 /*
1899 * When a new child is created while the hierarchy is under oom,
1900 * mem_cgroup_oom_lock() may not be called. We have to use
1901 * atomic_add_unless() here.
1902 */
7d74b06f 1903 for_each_mem_cgroup_tree(iter, mem)
79dfdacc 1904 atomic_add_unless(&iter->under_oom, -1, 0);
0b7f569e
KH
1905}
1906
1af8efe9 1907static DEFINE_SPINLOCK(memcg_oom_lock);
867578cb
KH
1908static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1909
dc98df5a
KH
1910struct oom_wait_info {
1911 struct mem_cgroup *mem;
1912 wait_queue_t wait;
1913};
1914
1915static int memcg_oom_wake_function(wait_queue_t *wait,
1916 unsigned mode, int sync, void *arg)
1917{
1918 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1919 struct oom_wait_info *oom_wait_info;
1920
1921 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1922
1923 if (oom_wait_info->mem == wake_mem)
1924 goto wakeup;
1925 /* if no hierarchy, no match */
1926 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1927 return 0;
1928 /*
1929 * Both of oom_wait_info->mem and wake_mem are stable under us.
1930 * Then we can use css_is_ancestor without taking care of RCU.
1931 */
1932 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1933 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1934 return 0;
1935
1936wakeup:
1937 return autoremove_wake_function(wait, mode, sync, arg);
1938}
1939
1940static void memcg_wakeup_oom(struct mem_cgroup *mem)
1941{
1942 /* for filtering, pass "mem" as argument. */
1943 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1944}
1945
3c11ecf4
KH
1946static void memcg_oom_recover(struct mem_cgroup *mem)
1947{
79dfdacc 1948 if (mem && atomic_read(&mem->under_oom))
3c11ecf4
KH
1949 memcg_wakeup_oom(mem);
1950}
1951
867578cb
KH
1952/*
1953 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1954 */
1955bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
0b7f569e 1956{
dc98df5a 1957 struct oom_wait_info owait;
3c11ecf4 1958 bool locked, need_to_kill;
867578cb 1959
dc98df5a
KH
1960 owait.mem = mem;
1961 owait.wait.flags = 0;
1962 owait.wait.func = memcg_oom_wake_function;
1963 owait.wait.private = current;
1964 INIT_LIST_HEAD(&owait.wait.task_list);
3c11ecf4 1965 need_to_kill = true;
79dfdacc
MH
1966 mem_cgroup_mark_under_oom(mem);
1967
867578cb 1968 /* At first, try to OOM lock hierarchy under mem.*/
1af8efe9 1969 spin_lock(&memcg_oom_lock);
867578cb
KH
1970 locked = mem_cgroup_oom_lock(mem);
1971 /*
1972 * Even if signal_pending(), we can't quit charge() loop without
1973 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1974 * under OOM is always welcomed, use TASK_KILLABLE here.
1975 */
3c11ecf4
KH
1976 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1977 if (!locked || mem->oom_kill_disable)
1978 need_to_kill = false;
1979 if (locked)
9490ff27 1980 mem_cgroup_oom_notify(mem);
1af8efe9 1981 spin_unlock(&memcg_oom_lock);
867578cb 1982
3c11ecf4
KH
1983 if (need_to_kill) {
1984 finish_wait(&memcg_oom_waitq, &owait.wait);
867578cb 1985 mem_cgroup_out_of_memory(mem, mask);
3c11ecf4 1986 } else {
867578cb 1987 schedule();
dc98df5a 1988 finish_wait(&memcg_oom_waitq, &owait.wait);
867578cb 1989 }
1af8efe9 1990 spin_lock(&memcg_oom_lock);
79dfdacc
MH
1991 if (locked)
1992 mem_cgroup_oom_unlock(mem);
dc98df5a 1993 memcg_wakeup_oom(mem);
1af8efe9 1994 spin_unlock(&memcg_oom_lock);
867578cb 1995
79dfdacc
MH
1996 mem_cgroup_unmark_under_oom(mem);
1997
867578cb
KH
1998 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1999 return false;
2000 /* Give chance to dying process */
2001 schedule_timeout(1);
2002 return true;
0b7f569e
KH
2003}
2004
d69b042f
BS
2005/*
2006 * Currently used to update mapped file statistics, but the routine can be
2007 * generalized to update other statistics as well.
32047e2a
KH
2008 *
2009 * Notes: Race condition
2010 *
2011 * We usually use page_cgroup_lock() for accessing page_cgroup member but
2012 * it tends to be costly. But considering some conditions, we doesn't need
2013 * to do so _always_.
2014 *
2015 * Considering "charge", lock_page_cgroup() is not required because all
2016 * file-stat operations happen after a page is attached to radix-tree. There
2017 * are no race with "charge".
2018 *
2019 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2020 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2021 * if there are race with "uncharge". Statistics itself is properly handled
2022 * by flags.
2023 *
2024 * Considering "move", this is an only case we see a race. To make the race
2025 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
2026 * possibility of race condition. If there is, we take a lock.
d69b042f 2027 */
26174efd 2028
2a7106f2
GT
2029void mem_cgroup_update_page_stat(struct page *page,
2030 enum mem_cgroup_page_stat_item idx, int val)
d69b042f
BS
2031{
2032 struct mem_cgroup *mem;
32047e2a
KH
2033 struct page_cgroup *pc = lookup_page_cgroup(page);
2034 bool need_unlock = false;
dbd4ea78 2035 unsigned long uninitialized_var(flags);
d69b042f 2036
d69b042f
BS
2037 if (unlikely(!pc))
2038 return;
2039
32047e2a 2040 rcu_read_lock();
d69b042f 2041 mem = pc->mem_cgroup;
32047e2a
KH
2042 if (unlikely(!mem || !PageCgroupUsed(pc)))
2043 goto out;
2044 /* pc->mem_cgroup is unstable ? */
ca3e0214 2045 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
32047e2a 2046 /* take a lock against to access pc->mem_cgroup */
dbd4ea78 2047 move_lock_page_cgroup(pc, &flags);
32047e2a
KH
2048 need_unlock = true;
2049 mem = pc->mem_cgroup;
2050 if (!mem || !PageCgroupUsed(pc))
2051 goto out;
2052 }
26174efd 2053
26174efd 2054 switch (idx) {
2a7106f2 2055 case MEMCG_NR_FILE_MAPPED:
26174efd
KH
2056 if (val > 0)
2057 SetPageCgroupFileMapped(pc);
2058 else if (!page_mapped(page))
0c270f8f 2059 ClearPageCgroupFileMapped(pc);
2a7106f2 2060 idx = MEM_CGROUP_STAT_FILE_MAPPED;
26174efd
KH
2061 break;
2062 default:
2063 BUG();
8725d541 2064 }
d69b042f 2065
2a7106f2
GT
2066 this_cpu_add(mem->stat->count[idx], val);
2067
32047e2a
KH
2068out:
2069 if (unlikely(need_unlock))
dbd4ea78 2070 move_unlock_page_cgroup(pc, &flags);
32047e2a
KH
2071 rcu_read_unlock();
2072 return;
d69b042f 2073}
2a7106f2 2074EXPORT_SYMBOL(mem_cgroup_update_page_stat);
26174efd 2075
cdec2e42
KH
2076/*
2077 * size of first charge trial. "32" comes from vmscan.c's magic value.
2078 * TODO: maybe necessary to use big numbers in big irons.
2079 */
7ec99d62 2080#define CHARGE_BATCH 32U
cdec2e42
KH
2081struct memcg_stock_pcp {
2082 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 2083 unsigned int nr_pages;
cdec2e42 2084 struct work_struct work;
26fe6168
KH
2085 unsigned long flags;
2086#define FLUSHING_CACHED_CHARGE (0)
cdec2e42
KH
2087};
2088static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
26fe6168 2089static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42
KH
2090
2091/*
11c9ea4e 2092 * Try to consume stocked charge on this cpu. If success, one page is consumed
cdec2e42
KH
2093 * from local stock and true is returned. If the stock is 0 or charges from a
2094 * cgroup which is not current target, returns false. This stock will be
2095 * refilled.
2096 */
2097static bool consume_stock(struct mem_cgroup *mem)
2098{
2099 struct memcg_stock_pcp *stock;
2100 bool ret = true;
2101
2102 stock = &get_cpu_var(memcg_stock);
11c9ea4e
JW
2103 if (mem == stock->cached && stock->nr_pages)
2104 stock->nr_pages--;
cdec2e42
KH
2105 else /* need to call res_counter_charge */
2106 ret = false;
2107 put_cpu_var(memcg_stock);
2108 return ret;
2109}
2110
2111/*
2112 * Returns stocks cached in percpu to res_counter and reset cached information.
2113 */
2114static void drain_stock(struct memcg_stock_pcp *stock)
2115{
2116 struct mem_cgroup *old = stock->cached;
2117
11c9ea4e
JW
2118 if (stock->nr_pages) {
2119 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2120
2121 res_counter_uncharge(&old->res, bytes);
cdec2e42 2122 if (do_swap_account)
11c9ea4e
JW
2123 res_counter_uncharge(&old->memsw, bytes);
2124 stock->nr_pages = 0;
cdec2e42
KH
2125 }
2126 stock->cached = NULL;
cdec2e42
KH
2127}
2128
2129/*
2130 * This must be called under preempt disabled or must be called by
2131 * a thread which is pinned to local cpu.
2132 */
2133static void drain_local_stock(struct work_struct *dummy)
2134{
2135 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2136 drain_stock(stock);
26fe6168 2137 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
cdec2e42
KH
2138}
2139
2140/*
2141 * Cache charges(val) which is from res_counter, to local per_cpu area.
320cc51d 2142 * This will be consumed by consume_stock() function, later.
cdec2e42 2143 */
11c9ea4e 2144static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
cdec2e42
KH
2145{
2146 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2147
2148 if (stock->cached != mem) { /* reset if necessary */
2149 drain_stock(stock);
2150 stock->cached = mem;
2151 }
11c9ea4e 2152 stock->nr_pages += nr_pages;
cdec2e42
KH
2153 put_cpu_var(memcg_stock);
2154}
2155
2156/*
2157 * Tries to drain stocked charges in other cpus. This function is asynchronous
2158 * and just put a work per cpu for draining localy on each cpu. Caller can
2159 * expects some charges will be back to res_counter later but cannot wait for
2160 * it.
2161 */
26fe6168 2162static void drain_all_stock_async(struct mem_cgroup *root_mem)
cdec2e42 2163{
26fe6168
KH
2164 int cpu, curcpu;
2165 /*
2166 * If someone calls draining, avoid adding more kworker runs.
cdec2e42 2167 */
26fe6168 2168 if (!mutex_trylock(&percpu_charge_mutex))
cdec2e42
KH
2169 return;
2170 /* Notify other cpus that system-wide "drain" is running */
cdec2e42 2171 get_online_cpus();
26fe6168
KH
2172 /*
2173 * Get a hint for avoiding draining charges on the current cpu,
2174 * which must be exhausted by our charging. It is not required that
2175 * this be a precise check, so we use raw_smp_processor_id() instead of
2176 * getcpu()/putcpu().
2177 */
2178 curcpu = raw_smp_processor_id();
cdec2e42
KH
2179 for_each_online_cpu(cpu) {
2180 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
26fe6168
KH
2181 struct mem_cgroup *mem;
2182
2183 if (cpu == curcpu)
2184 continue;
2185
2186 mem = stock->cached;
2187 if (!mem)
2188 continue;
2189 if (mem != root_mem) {
2190 if (!root_mem->use_hierarchy)
2191 continue;
2192 /* check whether "mem" is under tree of "root_mem" */
2193 if (!css_is_ancestor(&mem->css, &root_mem->css))
2194 continue;
2195 }
2196 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2197 schedule_work_on(cpu, &stock->work);
cdec2e42
KH
2198 }
2199 put_online_cpus();
26fe6168 2200 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2201 /* We don't wait for flush_work */
2202}
2203
2204/* This is a synchronous drain interface. */
2205static void drain_all_stock_sync(void)
2206{
2207 /* called when force_empty is called */
26fe6168 2208 mutex_lock(&percpu_charge_mutex);
cdec2e42 2209 schedule_on_each_cpu(drain_local_stock);
26fe6168 2210 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2211}
2212
711d3d2c
KH
2213/*
2214 * This function drains percpu counter value from DEAD cpu and
2215 * move it to local cpu. Note that this function can be preempted.
2216 */
2217static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
2218{
2219 int i;
2220
2221 spin_lock(&mem->pcp_counter_lock);
2222 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
7a159cc9 2223 long x = per_cpu(mem->stat->count[i], cpu);
711d3d2c
KH
2224
2225 per_cpu(mem->stat->count[i], cpu) = 0;
2226 mem->nocpu_base.count[i] += x;
2227 }
e9f8974f
JW
2228 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2229 unsigned long x = per_cpu(mem->stat->events[i], cpu);
2230
2231 per_cpu(mem->stat->events[i], cpu) = 0;
2232 mem->nocpu_base.events[i] += x;
2233 }
1489ebad
KH
2234 /* need to clear ON_MOVE value, works as a kind of lock. */
2235 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2236 spin_unlock(&mem->pcp_counter_lock);
2237}
2238
2239static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
2240{
2241 int idx = MEM_CGROUP_ON_MOVE;
2242
2243 spin_lock(&mem->pcp_counter_lock);
2244 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
711d3d2c
KH
2245 spin_unlock(&mem->pcp_counter_lock);
2246}
2247
2248static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
cdec2e42
KH
2249 unsigned long action,
2250 void *hcpu)
2251{
2252 int cpu = (unsigned long)hcpu;
2253 struct memcg_stock_pcp *stock;
711d3d2c 2254 struct mem_cgroup *iter;
cdec2e42 2255
1489ebad
KH
2256 if ((action == CPU_ONLINE)) {
2257 for_each_mem_cgroup_all(iter)
2258 synchronize_mem_cgroup_on_move(iter, cpu);
2259 return NOTIFY_OK;
2260 }
2261
711d3d2c 2262 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
cdec2e42 2263 return NOTIFY_OK;
711d3d2c
KH
2264
2265 for_each_mem_cgroup_all(iter)
2266 mem_cgroup_drain_pcp_counter(iter, cpu);
2267
cdec2e42
KH
2268 stock = &per_cpu(memcg_stock, cpu);
2269 drain_stock(stock);
2270 return NOTIFY_OK;
2271}
2272
4b534334
KH
2273
2274/* See __mem_cgroup_try_charge() for details */
2275enum {
2276 CHARGE_OK, /* success */
2277 CHARGE_RETRY, /* need to retry but retry is not bad */
2278 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2279 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2280 CHARGE_OOM_DIE, /* the current is killed because of OOM */
2281};
2282
7ec99d62
JW
2283static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
2284 unsigned int nr_pages, bool oom_check)
4b534334 2285{
7ec99d62 2286 unsigned long csize = nr_pages * PAGE_SIZE;
4b534334
KH
2287 struct mem_cgroup *mem_over_limit;
2288 struct res_counter *fail_res;
2289 unsigned long flags = 0;
2290 int ret;
2291
2292 ret = res_counter_charge(&mem->res, csize, &fail_res);
2293
2294 if (likely(!ret)) {
2295 if (!do_swap_account)
2296 return CHARGE_OK;
2297 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
2298 if (likely(!ret))
2299 return CHARGE_OK;
2300
01c88e2d 2301 res_counter_uncharge(&mem->res, csize);
4b534334
KH
2302 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2303 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2304 } else
2305 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
9221edb7 2306 /*
7ec99d62
JW
2307 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2308 * of regular pages (CHARGE_BATCH), or a single regular page (1).
9221edb7
JW
2309 *
2310 * Never reclaim on behalf of optional batching, retry with a
2311 * single page instead.
2312 */
7ec99d62 2313 if (nr_pages == CHARGE_BATCH)
4b534334
KH
2314 return CHARGE_RETRY;
2315
2316 if (!(gfp_mask & __GFP_WAIT))
2317 return CHARGE_WOULDBLOCK;
2318
2319 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
0ae5e89c 2320 gfp_mask, flags, NULL);
7ec99d62 2321 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
19942822 2322 return CHARGE_RETRY;
4b534334 2323 /*
19942822
JW
2324 * Even though the limit is exceeded at this point, reclaim
2325 * may have been able to free some pages. Retry the charge
2326 * before killing the task.
2327 *
2328 * Only for regular pages, though: huge pages are rather
2329 * unlikely to succeed so close to the limit, and we fall back
2330 * to regular pages anyway in case of failure.
4b534334 2331 */
7ec99d62 2332 if (nr_pages == 1 && ret)
4b534334
KH
2333 return CHARGE_RETRY;
2334
2335 /*
2336 * At task move, charge accounts can be doubly counted. So, it's
2337 * better to wait until the end of task_move if something is going on.
2338 */
2339 if (mem_cgroup_wait_acct_move(mem_over_limit))
2340 return CHARGE_RETRY;
2341
2342 /* If we don't need to call oom-killer at el, return immediately */
2343 if (!oom_check)
2344 return CHARGE_NOMEM;
2345 /* check OOM */
2346 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2347 return CHARGE_OOM_DIE;
2348
2349 return CHARGE_RETRY;
2350}
2351
f817ed48
KH
2352/*
2353 * Unlike exported interface, "oom" parameter is added. if oom==true,
2354 * oom-killer can be invoked.
8a9f3ccd 2355 */
f817ed48 2356static int __mem_cgroup_try_charge(struct mm_struct *mm,
ec168510 2357 gfp_t gfp_mask,
7ec99d62
JW
2358 unsigned int nr_pages,
2359 struct mem_cgroup **memcg,
2360 bool oom)
8a9f3ccd 2361{
7ec99d62 2362 unsigned int batch = max(CHARGE_BATCH, nr_pages);
4b534334
KH
2363 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2364 struct mem_cgroup *mem = NULL;
2365 int ret;
a636b327 2366
867578cb
KH
2367 /*
2368 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2369 * in system level. So, allow to go ahead dying process in addition to
2370 * MEMDIE process.
2371 */
2372 if (unlikely(test_thread_flag(TIF_MEMDIE)
2373 || fatal_signal_pending(current)))
2374 goto bypass;
a636b327 2375
8a9f3ccd 2376 /*
3be91277
HD
2377 * We always charge the cgroup the mm_struct belongs to.
2378 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
2379 * thread group leader migrates. It's possible that mm is not
2380 * set, if so charge the init_mm (happens for pagecache usage).
2381 */
f75ca962
KH
2382 if (!*memcg && !mm)
2383 goto bypass;
2384again:
2385 if (*memcg) { /* css should be a valid one */
4b534334 2386 mem = *memcg;
f75ca962
KH
2387 VM_BUG_ON(css_is_removed(&mem->css));
2388 if (mem_cgroup_is_root(mem))
2389 goto done;
7ec99d62 2390 if (nr_pages == 1 && consume_stock(mem))
f75ca962 2391 goto done;
4b534334
KH
2392 css_get(&mem->css);
2393 } else {
f75ca962 2394 struct task_struct *p;
54595fe2 2395
f75ca962
KH
2396 rcu_read_lock();
2397 p = rcu_dereference(mm->owner);
f75ca962 2398 /*
ebb76ce1
KH
2399 * Because we don't have task_lock(), "p" can exit.
2400 * In that case, "mem" can point to root or p can be NULL with
2401 * race with swapoff. Then, we have small risk of mis-accouning.
2402 * But such kind of mis-account by race always happens because
2403 * we don't have cgroup_mutex(). It's overkill and we allo that
2404 * small race, here.
2405 * (*) swapoff at el will charge against mm-struct not against
2406 * task-struct. So, mm->owner can be NULL.
f75ca962
KH
2407 */
2408 mem = mem_cgroup_from_task(p);
ebb76ce1 2409 if (!mem || mem_cgroup_is_root(mem)) {
f75ca962
KH
2410 rcu_read_unlock();
2411 goto done;
2412 }
7ec99d62 2413 if (nr_pages == 1 && consume_stock(mem)) {
f75ca962
KH
2414 /*
2415 * It seems dagerous to access memcg without css_get().
2416 * But considering how consume_stok works, it's not
2417 * necessary. If consume_stock success, some charges
2418 * from this memcg are cached on this cpu. So, we
2419 * don't need to call css_get()/css_tryget() before
2420 * calling consume_stock().
2421 */
2422 rcu_read_unlock();
2423 goto done;
2424 }
2425 /* after here, we may be blocked. we need to get refcnt */
2426 if (!css_tryget(&mem->css)) {
2427 rcu_read_unlock();
2428 goto again;
2429 }
2430 rcu_read_unlock();
2431 }
8a9f3ccd 2432
4b534334
KH
2433 do {
2434 bool oom_check;
7a81b88c 2435
4b534334 2436 /* If killed, bypass charge */
f75ca962
KH
2437 if (fatal_signal_pending(current)) {
2438 css_put(&mem->css);
4b534334 2439 goto bypass;
f75ca962 2440 }
6d61ef40 2441
4b534334
KH
2442 oom_check = false;
2443 if (oom && !nr_oom_retries) {
2444 oom_check = true;
2445 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
cdec2e42 2446 }
66e1707b 2447
7ec99d62 2448 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
4b534334
KH
2449 switch (ret) {
2450 case CHARGE_OK:
2451 break;
2452 case CHARGE_RETRY: /* not in OOM situation but retry */
7ec99d62 2453 batch = nr_pages;
f75ca962
KH
2454 css_put(&mem->css);
2455 mem = NULL;
2456 goto again;
4b534334 2457 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
f75ca962 2458 css_put(&mem->css);
4b534334
KH
2459 goto nomem;
2460 case CHARGE_NOMEM: /* OOM routine works */
f75ca962
KH
2461 if (!oom) {
2462 css_put(&mem->css);
867578cb 2463 goto nomem;
f75ca962 2464 }
4b534334
KH
2465 /* If oom, we never return -ENOMEM */
2466 nr_oom_retries--;
2467 break;
2468 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
f75ca962 2469 css_put(&mem->css);
867578cb 2470 goto bypass;
66e1707b 2471 }
4b534334
KH
2472 } while (ret != CHARGE_OK);
2473
7ec99d62
JW
2474 if (batch > nr_pages)
2475 refill_stock(mem, batch - nr_pages);
f75ca962 2476 css_put(&mem->css);
0c3e73e8 2477done:
f75ca962 2478 *memcg = mem;
7a81b88c
KH
2479 return 0;
2480nomem:
f75ca962 2481 *memcg = NULL;
7a81b88c 2482 return -ENOMEM;
867578cb
KH
2483bypass:
2484 *memcg = NULL;
2485 return 0;
7a81b88c 2486}
8a9f3ccd 2487
a3032a2c
DN
2488/*
2489 * Somemtimes we have to undo a charge we got by try_charge().
2490 * This function is for that and do uncharge, put css's refcnt.
2491 * gotten by try_charge().
2492 */
854ffa8d 2493static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
e7018b8d 2494 unsigned int nr_pages)
a3032a2c
DN
2495{
2496 if (!mem_cgroup_is_root(mem)) {
e7018b8d
JW
2497 unsigned long bytes = nr_pages * PAGE_SIZE;
2498
2499 res_counter_uncharge(&mem->res, bytes);
a3032a2c 2500 if (do_swap_account)
e7018b8d 2501 res_counter_uncharge(&mem->memsw, bytes);
a3032a2c 2502 }
854ffa8d
DN
2503}
2504
a3b2d692
KH
2505/*
2506 * A helper function to get mem_cgroup from ID. must be called under
2507 * rcu_read_lock(). The caller must check css_is_removed() or some if
2508 * it's concern. (dropping refcnt from swap can be called against removed
2509 * memcg.)
2510 */
2511static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2512{
2513 struct cgroup_subsys_state *css;
2514
2515 /* ID 0 is unused ID */
2516 if (!id)
2517 return NULL;
2518 css = css_lookup(&mem_cgroup_subsys, id);
2519 if (!css)
2520 return NULL;
2521 return container_of(css, struct mem_cgroup, css);
2522}
2523
e42d9d5d 2524struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319 2525{
e42d9d5d 2526 struct mem_cgroup *mem = NULL;
3c776e64 2527 struct page_cgroup *pc;
a3b2d692 2528 unsigned short id;
b5a84319
KH
2529 swp_entry_t ent;
2530
3c776e64
DN
2531 VM_BUG_ON(!PageLocked(page));
2532
3c776e64 2533 pc = lookup_page_cgroup(page);
c0bd3f63 2534 lock_page_cgroup(pc);
a3b2d692 2535 if (PageCgroupUsed(pc)) {
3c776e64 2536 mem = pc->mem_cgroup;
a3b2d692
KH
2537 if (mem && !css_tryget(&mem->css))
2538 mem = NULL;
e42d9d5d 2539 } else if (PageSwapCache(page)) {
3c776e64 2540 ent.val = page_private(page);
a3b2d692
KH
2541 id = lookup_swap_cgroup(ent);
2542 rcu_read_lock();
2543 mem = mem_cgroup_lookup(id);
2544 if (mem && !css_tryget(&mem->css))
2545 mem = NULL;
2546 rcu_read_unlock();
3c776e64 2547 }
c0bd3f63 2548 unlock_page_cgroup(pc);
b5a84319
KH
2549 return mem;
2550}
2551
ca3e0214 2552static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
5564e88b 2553 struct page *page,
7ec99d62 2554 unsigned int nr_pages,
ca3e0214 2555 struct page_cgroup *pc,
7ec99d62 2556 enum charge_type ctype)
7a81b88c 2557{
ca3e0214
KH
2558 lock_page_cgroup(pc);
2559 if (unlikely(PageCgroupUsed(pc))) {
2560 unlock_page_cgroup(pc);
e7018b8d 2561 __mem_cgroup_cancel_charge(mem, nr_pages);
ca3e0214
KH
2562 return;
2563 }
2564 /*
2565 * we don't need page_cgroup_lock about tail pages, becase they are not
2566 * accessed by any other context at this point.
2567 */
8a9f3ccd 2568 pc->mem_cgroup = mem;
261fb61a
KH
2569 /*
2570 * We access a page_cgroup asynchronously without lock_page_cgroup().
2571 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2572 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2573 * before USED bit, we need memory barrier here.
2574 * See mem_cgroup_add_lru_list(), etc.
2575 */
08e552c6 2576 smp_wmb();
4b3bde4c
BS
2577 switch (ctype) {
2578 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2579 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2580 SetPageCgroupCache(pc);
2581 SetPageCgroupUsed(pc);
2582 break;
2583 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2584 ClearPageCgroupCache(pc);
2585 SetPageCgroupUsed(pc);
2586 break;
2587 default:
2588 break;
2589 }
3be91277 2590
ca3e0214 2591 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
52d4b9ac 2592 unlock_page_cgroup(pc);
430e4863
KH
2593 /*
2594 * "charge_statistics" updated event counter. Then, check it.
2595 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2596 * if they exceeds softlimit.
2597 */
5564e88b 2598 memcg_check_events(mem, page);
7a81b88c 2599}
66e1707b 2600
ca3e0214
KH
2601#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2602
2603#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2604 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2605/*
2606 * Because tail pages are not marked as "used", set it. We're under
2607 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2608 */
2609void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2610{
2611 struct page_cgroup *head_pc = lookup_page_cgroup(head);
2612 struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2613 unsigned long flags;
2614
3d37c4a9
KH
2615 if (mem_cgroup_disabled())
2616 return;
ca3e0214 2617 /*
ece35ca8 2618 * We have no races with charge/uncharge but will have races with
ca3e0214
KH
2619 * page state accounting.
2620 */
2621 move_lock_page_cgroup(head_pc, &flags);
2622
2623 tail_pc->mem_cgroup = head_pc->mem_cgroup;
2624 smp_wmb(); /* see __commit_charge() */
ece35ca8
KH
2625 if (PageCgroupAcctLRU(head_pc)) {
2626 enum lru_list lru;
2627 struct mem_cgroup_per_zone *mz;
2628
2629 /*
2630 * LRU flags cannot be copied because we need to add tail
2631 *.page to LRU by generic call and our hook will be called.
2632 * We hold lru_lock, then, reduce counter directly.
2633 */
2634 lru = page_lru(head);
97a6c37b 2635 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
ece35ca8
KH
2636 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2637 }
ca3e0214
KH
2638 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2639 move_unlock_page_cgroup(head_pc, &flags);
2640}
2641#endif
2642
f817ed48 2643/**
de3638d9 2644 * mem_cgroup_move_account - move account of the page
5564e88b 2645 * @page: the page
7ec99d62 2646 * @nr_pages: number of regular pages (>1 for huge pages)
f817ed48
KH
2647 * @pc: page_cgroup of the page.
2648 * @from: mem_cgroup which the page is moved from.
2649 * @to: mem_cgroup which the page is moved to. @from != @to.
854ffa8d 2650 * @uncharge: whether we should call uncharge and css_put against @from.
f817ed48
KH
2651 *
2652 * The caller must confirm following.
08e552c6 2653 * - page is not on LRU (isolate_page() is useful.)
7ec99d62 2654 * - compound_lock is held when nr_pages > 1
f817ed48 2655 *
854ffa8d 2656 * This function doesn't do "charge" nor css_get to new cgroup. It should be
25985edc 2657 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
854ffa8d
DN
2658 * true, this function does "uncharge" from old cgroup, but it doesn't if
2659 * @uncharge is false, so a caller should do "uncharge".
f817ed48 2660 */
7ec99d62
JW
2661static int mem_cgroup_move_account(struct page *page,
2662 unsigned int nr_pages,
2663 struct page_cgroup *pc,
2664 struct mem_cgroup *from,
2665 struct mem_cgroup *to,
2666 bool uncharge)
f817ed48 2667{
de3638d9
JW
2668 unsigned long flags;
2669 int ret;
987eba66 2670
f817ed48 2671 VM_BUG_ON(from == to);
5564e88b 2672 VM_BUG_ON(PageLRU(page));
de3638d9
JW
2673 /*
2674 * The page is isolated from LRU. So, collapse function
2675 * will not handle this page. But page splitting can happen.
2676 * Do this check under compound_page_lock(). The caller should
2677 * hold it.
2678 */
2679 ret = -EBUSY;
7ec99d62 2680 if (nr_pages > 1 && !PageTransHuge(page))
de3638d9
JW
2681 goto out;
2682
2683 lock_page_cgroup(pc);
2684
2685 ret = -EINVAL;
2686 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2687 goto unlock;
2688
2689 move_lock_page_cgroup(pc, &flags);
f817ed48 2690
8725d541 2691 if (PageCgroupFileMapped(pc)) {
c62b1a3b
KH
2692 /* Update mapped_file data for mem_cgroup */
2693 preempt_disable();
2694 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2695 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2696 preempt_enable();
d69b042f 2697 }
987eba66 2698 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
854ffa8d
DN
2699 if (uncharge)
2700 /* This is not "cancel", but cancel_charge does all we need. */
e7018b8d 2701 __mem_cgroup_cancel_charge(from, nr_pages);
d69b042f 2702
854ffa8d 2703 /* caller should have done css_get */
08e552c6 2704 pc->mem_cgroup = to;
987eba66 2705 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
88703267
KH
2706 /*
2707 * We charges against "to" which may not have any tasks. Then, "to"
2708 * can be under rmdir(). But in current implementation, caller of
4ffef5fe 2709 * this function is just force_empty() and move charge, so it's
25985edc 2710 * guaranteed that "to" is never removed. So, we don't check rmdir
4ffef5fe 2711 * status here.
88703267 2712 */
de3638d9
JW
2713 move_unlock_page_cgroup(pc, &flags);
2714 ret = 0;
2715unlock:
57f9fd7d 2716 unlock_page_cgroup(pc);
d2265e6f
KH
2717 /*
2718 * check events
2719 */
5564e88b
JW
2720 memcg_check_events(to, page);
2721 memcg_check_events(from, page);
de3638d9 2722out:
f817ed48
KH
2723 return ret;
2724}
2725
2726/*
2727 * move charges to its parent.
2728 */
2729
5564e88b
JW
2730static int mem_cgroup_move_parent(struct page *page,
2731 struct page_cgroup *pc,
f817ed48
KH
2732 struct mem_cgroup *child,
2733 gfp_t gfp_mask)
2734{
2735 struct cgroup *cg = child->css.cgroup;
2736 struct cgroup *pcg = cg->parent;
2737 struct mem_cgroup *parent;
7ec99d62 2738 unsigned int nr_pages;
4be4489f 2739 unsigned long uninitialized_var(flags);
f817ed48
KH
2740 int ret;
2741
2742 /* Is ROOT ? */
2743 if (!pcg)
2744 return -EINVAL;
2745
57f9fd7d
DN
2746 ret = -EBUSY;
2747 if (!get_page_unless_zero(page))
2748 goto out;
2749 if (isolate_lru_page(page))
2750 goto put;
52dbb905 2751
7ec99d62 2752 nr_pages = hpage_nr_pages(page);
08e552c6 2753
f817ed48 2754 parent = mem_cgroup_from_cont(pcg);
7ec99d62 2755 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
a636b327 2756 if (ret || !parent)
57f9fd7d 2757 goto put_back;
f817ed48 2758
7ec99d62 2759 if (nr_pages > 1)
987eba66
KH
2760 flags = compound_lock_irqsave(page);
2761
7ec99d62 2762 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
854ffa8d 2763 if (ret)
7ec99d62 2764 __mem_cgroup_cancel_charge(parent, nr_pages);
8dba474f 2765
7ec99d62 2766 if (nr_pages > 1)
987eba66 2767 compound_unlock_irqrestore(page, flags);
8dba474f 2768put_back:
08e552c6 2769 putback_lru_page(page);
57f9fd7d 2770put:
40d58138 2771 put_page(page);
57f9fd7d 2772out:
f817ed48
KH
2773 return ret;
2774}
2775
7a81b88c
KH
2776/*
2777 * Charge the memory controller for page usage.
2778 * Return
2779 * 0 if the charge was successful
2780 * < 0 if the cgroup is over its limit
2781 */
2782static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
73045c47 2783 gfp_t gfp_mask, enum charge_type ctype)
7a81b88c 2784{
73045c47 2785 struct mem_cgroup *mem = NULL;
7ec99d62 2786 unsigned int nr_pages = 1;
7a81b88c 2787 struct page_cgroup *pc;
8493ae43 2788 bool oom = true;
7a81b88c 2789 int ret;
ec168510 2790
37c2ac78 2791 if (PageTransHuge(page)) {
7ec99d62 2792 nr_pages <<= compound_order(page);
37c2ac78 2793 VM_BUG_ON(!PageTransHuge(page));
8493ae43
JW
2794 /*
2795 * Never OOM-kill a process for a huge page. The
2796 * fault handler will fall back to regular pages.
2797 */
2798 oom = false;
37c2ac78 2799 }
7a81b88c
KH
2800
2801 pc = lookup_page_cgroup(page);
af4a6621 2802 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
7a81b88c 2803
7ec99d62 2804 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
a636b327 2805 if (ret || !mem)
7a81b88c
KH
2806 return ret;
2807
7ec99d62 2808 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
8a9f3ccd 2809 return 0;
8a9f3ccd
BS
2810}
2811
7a81b88c
KH
2812int mem_cgroup_newpage_charge(struct page *page,
2813 struct mm_struct *mm, gfp_t gfp_mask)
217bc319 2814{
f8d66542 2815 if (mem_cgroup_disabled())
cede86ac 2816 return 0;
69029cd5
KH
2817 /*
2818 * If already mapped, we don't have to account.
2819 * If page cache, page->mapping has address_space.
2820 * But page->mapping may have out-of-use anon_vma pointer,
2821 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2822 * is NULL.
2823 */
2824 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2825 return 0;
2826 if (unlikely(!mm))
2827 mm = &init_mm;
217bc319 2828 return mem_cgroup_charge_common(page, mm, gfp_mask,
73045c47 2829 MEM_CGROUP_CHARGE_TYPE_MAPPED);
217bc319
KH
2830}
2831
83aae4c7
DN
2832static void
2833__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2834 enum charge_type ctype);
2835
5a6475a4
KH
2836static void
2837__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
2838 enum charge_type ctype)
2839{
2840 struct page_cgroup *pc = lookup_page_cgroup(page);
2841 /*
2842 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2843 * is already on LRU. It means the page may on some other page_cgroup's
2844 * LRU. Take care of it.
2845 */
2846 mem_cgroup_lru_del_before_commit(page);
2847 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
2848 mem_cgroup_lru_add_after_commit(page);
2849 return;
2850}
2851
e1a1cd59
BS
2852int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2853 gfp_t gfp_mask)
8697d331 2854{
5a6475a4 2855 struct mem_cgroup *mem = NULL;
b5a84319
KH
2856 int ret;
2857
f8d66542 2858 if (mem_cgroup_disabled())
cede86ac 2859 return 0;
52d4b9ac
KH
2860 if (PageCompound(page))
2861 return 0;
accf163e
KH
2862 /*
2863 * Corner case handling. This is called from add_to_page_cache()
2864 * in usual. But some FS (shmem) precharges this page before calling it
2865 * and call add_to_page_cache() with GFP_NOWAIT.
2866 *
2867 * For GFP_NOWAIT case, the page may be pre-charged before calling
2868 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2869 * charge twice. (It works but has to pay a bit larger cost.)
b5a84319
KH
2870 * And when the page is SwapCache, it should take swap information
2871 * into account. This is under lock_page() now.
accf163e
KH
2872 */
2873 if (!(gfp_mask & __GFP_WAIT)) {
2874 struct page_cgroup *pc;
2875
52d4b9ac
KH
2876 pc = lookup_page_cgroup(page);
2877 if (!pc)
2878 return 0;
2879 lock_page_cgroup(pc);
2880 if (PageCgroupUsed(pc)) {
2881 unlock_page_cgroup(pc);
accf163e
KH
2882 return 0;
2883 }
52d4b9ac 2884 unlock_page_cgroup(pc);
accf163e
KH
2885 }
2886
73045c47 2887 if (unlikely(!mm))
8697d331 2888 mm = &init_mm;
accf163e 2889
5a6475a4
KH
2890 if (page_is_file_cache(page)) {
2891 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
2892 if (ret || !mem)
2893 return ret;
b5a84319 2894
5a6475a4
KH
2895 /*
2896 * FUSE reuses pages without going through the final
2897 * put that would remove them from the LRU list, make
2898 * sure that they get relinked properly.
2899 */
2900 __mem_cgroup_commit_charge_lrucare(page, mem,
2901 MEM_CGROUP_CHARGE_TYPE_CACHE);
2902 return ret;
2903 }
83aae4c7
DN
2904 /* shmem */
2905 if (PageSwapCache(page)) {
2906 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2907 if (!ret)
2908 __mem_cgroup_commit_charge_swapin(page, mem,
2909 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2910 } else
2911 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
73045c47 2912 MEM_CGROUP_CHARGE_TYPE_SHMEM);
b5a84319 2913
b5a84319 2914 return ret;
e8589cc1
KH
2915}
2916
54595fe2
KH
2917/*
2918 * While swap-in, try_charge -> commit or cancel, the page is locked.
2919 * And when try_charge() successfully returns, one refcnt to memcg without
21ae2956 2920 * struct page_cgroup is acquired. This refcnt will be consumed by
54595fe2
KH
2921 * "commit()" or removed by "cancel()"
2922 */
8c7c6e34
KH
2923int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2924 struct page *page,
2925 gfp_t mask, struct mem_cgroup **ptr)
2926{
2927 struct mem_cgroup *mem;
54595fe2 2928 int ret;
8c7c6e34 2929
56039efa
KH
2930 *ptr = NULL;
2931
f8d66542 2932 if (mem_cgroup_disabled())
8c7c6e34
KH
2933 return 0;
2934
2935 if (!do_swap_account)
2936 goto charge_cur_mm;
8c7c6e34
KH
2937 /*
2938 * A racing thread's fault, or swapoff, may have already updated
407f9c8b
HD
2939 * the pte, and even removed page from swap cache: in those cases
2940 * do_swap_page()'s pte_same() test will fail; but there's also a
2941 * KSM case which does need to charge the page.
8c7c6e34
KH
2942 */
2943 if (!PageSwapCache(page))
407f9c8b 2944 goto charge_cur_mm;
e42d9d5d 2945 mem = try_get_mem_cgroup_from_page(page);
54595fe2
KH
2946 if (!mem)
2947 goto charge_cur_mm;
8c7c6e34 2948 *ptr = mem;
7ec99d62 2949 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
54595fe2
KH
2950 css_put(&mem->css);
2951 return ret;
8c7c6e34
KH
2952charge_cur_mm:
2953 if (unlikely(!mm))
2954 mm = &init_mm;
7ec99d62 2955 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
8c7c6e34
KH
2956}
2957
83aae4c7
DN
2958static void
2959__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2960 enum charge_type ctype)
7a81b88c 2961{
f8d66542 2962 if (mem_cgroup_disabled())
7a81b88c
KH
2963 return;
2964 if (!ptr)
2965 return;
88703267 2966 cgroup_exclude_rmdir(&ptr->css);
5a6475a4
KH
2967
2968 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
8c7c6e34
KH
2969 /*
2970 * Now swap is on-memory. This means this page may be
2971 * counted both as mem and swap....double count.
03f3c433
KH
2972 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2973 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2974 * may call delete_from_swap_cache() before reach here.
8c7c6e34 2975 */
03f3c433 2976 if (do_swap_account && PageSwapCache(page)) {
8c7c6e34 2977 swp_entry_t ent = {.val = page_private(page)};
a3b2d692 2978 unsigned short id;
8c7c6e34 2979 struct mem_cgroup *memcg;
a3b2d692
KH
2980
2981 id = swap_cgroup_record(ent, 0);
2982 rcu_read_lock();
2983 memcg = mem_cgroup_lookup(id);
8c7c6e34 2984 if (memcg) {
a3b2d692
KH
2985 /*
2986 * This recorded memcg can be obsolete one. So, avoid
2987 * calling css_tryget
2988 */
0c3e73e8 2989 if (!mem_cgroup_is_root(memcg))
4e649152 2990 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 2991 mem_cgroup_swap_statistics(memcg, false);
8c7c6e34
KH
2992 mem_cgroup_put(memcg);
2993 }
a3b2d692 2994 rcu_read_unlock();
8c7c6e34 2995 }
88703267
KH
2996 /*
2997 * At swapin, we may charge account against cgroup which has no tasks.
2998 * So, rmdir()->pre_destroy() can be called while we do this charge.
2999 * In that case, we need to call pre_destroy() again. check it here.
3000 */
3001 cgroup_release_and_wakeup_rmdir(&ptr->css);
7a81b88c
KH
3002}
3003
83aae4c7
DN
3004void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
3005{
3006 __mem_cgroup_commit_charge_swapin(page, ptr,
3007 MEM_CGROUP_CHARGE_TYPE_MAPPED);
3008}
3009
7a81b88c
KH
3010void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
3011{
f8d66542 3012 if (mem_cgroup_disabled())
7a81b88c
KH
3013 return;
3014 if (!mem)
3015 return;
e7018b8d 3016 __mem_cgroup_cancel_charge(mem, 1);
7a81b88c
KH
3017}
3018
7ec99d62
JW
3019static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
3020 unsigned int nr_pages,
3021 const enum charge_type ctype)
569b846d
KH
3022{
3023 struct memcg_batch_info *batch = NULL;
3024 bool uncharge_memsw = true;
7ec99d62 3025
569b846d
KH
3026 /* If swapout, usage of swap doesn't decrease */
3027 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3028 uncharge_memsw = false;
569b846d
KH
3029
3030 batch = &current->memcg_batch;
3031 /*
3032 * In usual, we do css_get() when we remember memcg pointer.
3033 * But in this case, we keep res->usage until end of a series of
3034 * uncharges. Then, it's ok to ignore memcg's refcnt.
3035 */
3036 if (!batch->memcg)
3037 batch->memcg = mem;
3c11ecf4
KH
3038 /*
3039 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
25985edc 3040 * In those cases, all pages freed continuously can be expected to be in
3c11ecf4
KH
3041 * the same cgroup and we have chance to coalesce uncharges.
3042 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
3043 * because we want to do uncharge as soon as possible.
3044 */
3045
3046 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
3047 goto direct_uncharge;
3048
7ec99d62 3049 if (nr_pages > 1)
ec168510
AA
3050 goto direct_uncharge;
3051
569b846d
KH
3052 /*
3053 * In typical case, batch->memcg == mem. This means we can
3054 * merge a series of uncharges to an uncharge of res_counter.
3055 * If not, we uncharge res_counter ony by one.
3056 */
3057 if (batch->memcg != mem)
3058 goto direct_uncharge;
3059 /* remember freed charge and uncharge it later */
7ffd4ca7 3060 batch->nr_pages++;
569b846d 3061 if (uncharge_memsw)
7ffd4ca7 3062 batch->memsw_nr_pages++;
569b846d
KH
3063 return;
3064direct_uncharge:
7ec99d62 3065 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
569b846d 3066 if (uncharge_memsw)
7ec99d62 3067 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
3c11ecf4
KH
3068 if (unlikely(batch->memcg != mem))
3069 memcg_oom_recover(mem);
569b846d
KH
3070 return;
3071}
7a81b88c 3072
8a9f3ccd 3073/*
69029cd5 3074 * uncharge if !page_mapped(page)
8a9f3ccd 3075 */
8c7c6e34 3076static struct mem_cgroup *
69029cd5 3077__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
8a9f3ccd 3078{
8c7c6e34 3079 struct mem_cgroup *mem = NULL;
7ec99d62
JW
3080 unsigned int nr_pages = 1;
3081 struct page_cgroup *pc;
8a9f3ccd 3082
f8d66542 3083 if (mem_cgroup_disabled())
8c7c6e34 3084 return NULL;
4077960e 3085
d13d1443 3086 if (PageSwapCache(page))
8c7c6e34 3087 return NULL;
d13d1443 3088
37c2ac78 3089 if (PageTransHuge(page)) {
7ec99d62 3090 nr_pages <<= compound_order(page);
37c2ac78
AA
3091 VM_BUG_ON(!PageTransHuge(page));
3092 }
8697d331 3093 /*
3c541e14 3094 * Check if our page_cgroup is valid
8697d331 3095 */
52d4b9ac
KH
3096 pc = lookup_page_cgroup(page);
3097 if (unlikely(!pc || !PageCgroupUsed(pc)))
8c7c6e34 3098 return NULL;
b9c565d5 3099
52d4b9ac 3100 lock_page_cgroup(pc);
d13d1443 3101
8c7c6e34
KH
3102 mem = pc->mem_cgroup;
3103
d13d1443
KH
3104 if (!PageCgroupUsed(pc))
3105 goto unlock_out;
3106
3107 switch (ctype) {
3108 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
8a9478ca 3109 case MEM_CGROUP_CHARGE_TYPE_DROP:
ac39cf8c
AM
3110 /* See mem_cgroup_prepare_migration() */
3111 if (page_mapped(page) || PageCgroupMigration(pc))
d13d1443
KH
3112 goto unlock_out;
3113 break;
3114 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3115 if (!PageAnon(page)) { /* Shared memory */
3116 if (page->mapping && !page_is_file_cache(page))
3117 goto unlock_out;
3118 } else if (page_mapped(page)) /* Anon */
3119 goto unlock_out;
3120 break;
3121 default:
3122 break;
52d4b9ac 3123 }
d13d1443 3124
7ec99d62 3125 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
04046e1a 3126
52d4b9ac 3127 ClearPageCgroupUsed(pc);
544122e5
KH
3128 /*
3129 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3130 * freed from LRU. This is safe because uncharged page is expected not
3131 * to be reused (freed soon). Exception is SwapCache, it's handled by
3132 * special functions.
3133 */
b9c565d5 3134
52d4b9ac 3135 unlock_page_cgroup(pc);
f75ca962
KH
3136 /*
3137 * even after unlock, we have mem->res.usage here and this memcg
3138 * will never be freed.
3139 */
d2265e6f 3140 memcg_check_events(mem, page);
f75ca962
KH
3141 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3142 mem_cgroup_swap_statistics(mem, true);
3143 mem_cgroup_get(mem);
3144 }
3145 if (!mem_cgroup_is_root(mem))
7ec99d62 3146 mem_cgroup_do_uncharge(mem, nr_pages, ctype);
6d12e2d8 3147
8c7c6e34 3148 return mem;
d13d1443
KH
3149
3150unlock_out:
3151 unlock_page_cgroup(pc);
8c7c6e34 3152 return NULL;
3c541e14
BS
3153}
3154
69029cd5
KH
3155void mem_cgroup_uncharge_page(struct page *page)
3156{
52d4b9ac
KH
3157 /* early check. */
3158 if (page_mapped(page))
3159 return;
3160 if (page->mapping && !PageAnon(page))
3161 return;
69029cd5
KH
3162 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3163}
3164
3165void mem_cgroup_uncharge_cache_page(struct page *page)
3166{
3167 VM_BUG_ON(page_mapped(page));
b7abea96 3168 VM_BUG_ON(page->mapping);
69029cd5
KH
3169 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3170}
3171
569b846d
KH
3172/*
3173 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3174 * In that cases, pages are freed continuously and we can expect pages
3175 * are in the same memcg. All these calls itself limits the number of
3176 * pages freed at once, then uncharge_start/end() is called properly.
3177 * This may be called prural(2) times in a context,
3178 */
3179
3180void mem_cgroup_uncharge_start(void)
3181{
3182 current->memcg_batch.do_batch++;
3183 /* We can do nest. */
3184 if (current->memcg_batch.do_batch == 1) {
3185 current->memcg_batch.memcg = NULL;
7ffd4ca7
JW
3186 current->memcg_batch.nr_pages = 0;
3187 current->memcg_batch.memsw_nr_pages = 0;
569b846d
KH
3188 }
3189}
3190
3191void mem_cgroup_uncharge_end(void)
3192{
3193 struct memcg_batch_info *batch = &current->memcg_batch;
3194
3195 if (!batch->do_batch)
3196 return;
3197
3198 batch->do_batch--;
3199 if (batch->do_batch) /* If stacked, do nothing. */
3200 return;
3201
3202 if (!batch->memcg)
3203 return;
3204 /*
3205 * This "batch->memcg" is valid without any css_get/put etc...
3206 * bacause we hide charges behind us.
3207 */
7ffd4ca7
JW
3208 if (batch->nr_pages)
3209 res_counter_uncharge(&batch->memcg->res,
3210 batch->nr_pages * PAGE_SIZE);
3211 if (batch->memsw_nr_pages)
3212 res_counter_uncharge(&batch->memcg->memsw,
3213 batch->memsw_nr_pages * PAGE_SIZE);
3c11ecf4 3214 memcg_oom_recover(batch->memcg);
569b846d
KH
3215 /* forget this pointer (for sanity check) */
3216 batch->memcg = NULL;
3217}
3218
e767e056 3219#ifdef CONFIG_SWAP
8c7c6e34 3220/*
e767e056 3221 * called after __delete_from_swap_cache() and drop "page" account.
8c7c6e34
KH
3222 * memcg information is recorded to swap_cgroup of "ent"
3223 */
8a9478ca
KH
3224void
3225mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
8c7c6e34
KH
3226{
3227 struct mem_cgroup *memcg;
8a9478ca
KH
3228 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3229
3230 if (!swapout) /* this was a swap cache but the swap is unused ! */
3231 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3232
3233 memcg = __mem_cgroup_uncharge_common(page, ctype);
8c7c6e34 3234
f75ca962
KH
3235 /*
3236 * record memcg information, if swapout && memcg != NULL,
3237 * mem_cgroup_get() was called in uncharge().
3238 */
3239 if (do_swap_account && swapout && memcg)
a3b2d692 3240 swap_cgroup_record(ent, css_id(&memcg->css));
8c7c6e34 3241}
e767e056 3242#endif
8c7c6e34
KH
3243
3244#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3245/*
3246 * called from swap_entry_free(). remove record in swap_cgroup and
3247 * uncharge "memsw" account.
3248 */
3249void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d1443 3250{
8c7c6e34 3251 struct mem_cgroup *memcg;
a3b2d692 3252 unsigned short id;
8c7c6e34
KH
3253
3254 if (!do_swap_account)
3255 return;
3256
a3b2d692
KH
3257 id = swap_cgroup_record(ent, 0);
3258 rcu_read_lock();
3259 memcg = mem_cgroup_lookup(id);
8c7c6e34 3260 if (memcg) {
a3b2d692
KH
3261 /*
3262 * We uncharge this because swap is freed.
3263 * This memcg can be obsolete one. We avoid calling css_tryget
3264 */
0c3e73e8 3265 if (!mem_cgroup_is_root(memcg))
4e649152 3266 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 3267 mem_cgroup_swap_statistics(memcg, false);
8c7c6e34
KH
3268 mem_cgroup_put(memcg);
3269 }
a3b2d692 3270 rcu_read_unlock();
d13d1443 3271}
02491447
DN
3272
3273/**
3274 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3275 * @entry: swap entry to be moved
3276 * @from: mem_cgroup which the entry is moved from
3277 * @to: mem_cgroup which the entry is moved to
483c30b5 3278 * @need_fixup: whether we should fixup res_counters and refcounts.
02491447
DN
3279 *
3280 * It succeeds only when the swap_cgroup's record for this entry is the same
3281 * as the mem_cgroup's id of @from.
3282 *
3283 * Returns 0 on success, -EINVAL on failure.
3284 *
3285 * The caller must have charged to @to, IOW, called res_counter_charge() about
3286 * both res and memsw, and called css_get().
3287 */
3288static int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b5 3289 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
02491447
DN
3290{
3291 unsigned short old_id, new_id;
3292
3293 old_id = css_id(&from->css);
3294 new_id = css_id(&to->css);
3295
3296 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 3297 mem_cgroup_swap_statistics(from, false);
483c30b5 3298 mem_cgroup_swap_statistics(to, true);
02491447 3299 /*
483c30b5
DN
3300 * This function is only called from task migration context now.
3301 * It postpones res_counter and refcount handling till the end
3302 * of task migration(mem_cgroup_clear_mc()) for performance
3303 * improvement. But we cannot postpone mem_cgroup_get(to)
3304 * because if the process that has been moved to @to does
3305 * swap-in, the refcount of @to might be decreased to 0.
02491447 3306 */
02491447 3307 mem_cgroup_get(to);
483c30b5
DN
3308 if (need_fixup) {
3309 if (!mem_cgroup_is_root(from))
3310 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3311 mem_cgroup_put(from);
3312 /*
3313 * we charged both to->res and to->memsw, so we should
3314 * uncharge to->res.
3315 */
3316 if (!mem_cgroup_is_root(to))
3317 res_counter_uncharge(&to->res, PAGE_SIZE);
483c30b5 3318 }
02491447
DN
3319 return 0;
3320 }
3321 return -EINVAL;
3322}
3323#else
3324static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b5 3325 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
02491447
DN
3326{
3327 return -EINVAL;
3328}
8c7c6e34 3329#endif
d13d1443 3330
ae41be37 3331/*
01b1ae63
KH
3332 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3333 * page belongs to.
ae41be37 3334 */
ac39cf8c 3335int mem_cgroup_prepare_migration(struct page *page,
ef6a3c63 3336 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
ae41be37 3337{
e8589cc1 3338 struct mem_cgroup *mem = NULL;
7ec99d62 3339 struct page_cgroup *pc;
ac39cf8c 3340 enum charge_type ctype;
e8589cc1 3341 int ret = 0;
8869b8f6 3342
56039efa
KH
3343 *ptr = NULL;
3344
ec168510 3345 VM_BUG_ON(PageTransHuge(page));
f8d66542 3346 if (mem_cgroup_disabled())
4077960e
BS
3347 return 0;
3348
52d4b9ac
KH
3349 pc = lookup_page_cgroup(page);
3350 lock_page_cgroup(pc);
3351 if (PageCgroupUsed(pc)) {
e8589cc1
KH
3352 mem = pc->mem_cgroup;
3353 css_get(&mem->css);
ac39cf8c
AM
3354 /*
3355 * At migrating an anonymous page, its mapcount goes down
3356 * to 0 and uncharge() will be called. But, even if it's fully
3357 * unmapped, migration may fail and this page has to be
3358 * charged again. We set MIGRATION flag here and delay uncharge
3359 * until end_migration() is called
3360 *
3361 * Corner Case Thinking
3362 * A)
3363 * When the old page was mapped as Anon and it's unmap-and-freed
3364 * while migration was ongoing.
3365 * If unmap finds the old page, uncharge() of it will be delayed
3366 * until end_migration(). If unmap finds a new page, it's
3367 * uncharged when it make mapcount to be 1->0. If unmap code
3368 * finds swap_migration_entry, the new page will not be mapped
3369 * and end_migration() will find it(mapcount==0).
3370 *
3371 * B)
3372 * When the old page was mapped but migraion fails, the kernel
3373 * remaps it. A charge for it is kept by MIGRATION flag even
3374 * if mapcount goes down to 0. We can do remap successfully
3375 * without charging it again.
3376 *
3377 * C)
3378 * The "old" page is under lock_page() until the end of
3379 * migration, so, the old page itself will not be swapped-out.
3380 * If the new page is swapped out before end_migraton, our
3381 * hook to usual swap-out path will catch the event.
3382 */
3383 if (PageAnon(page))
3384 SetPageCgroupMigration(pc);
e8589cc1 3385 }
52d4b9ac 3386 unlock_page_cgroup(pc);
ac39cf8c
AM
3387 /*
3388 * If the page is not charged at this point,
3389 * we return here.
3390 */
3391 if (!mem)
3392 return 0;
01b1ae63 3393
93d5c9be 3394 *ptr = mem;
7ec99d62 3395 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
ac39cf8c
AM
3396 css_put(&mem->css);/* drop extra refcnt */
3397 if (ret || *ptr == NULL) {
3398 if (PageAnon(page)) {
3399 lock_page_cgroup(pc);
3400 ClearPageCgroupMigration(pc);
3401 unlock_page_cgroup(pc);
3402 /*
3403 * The old page may be fully unmapped while we kept it.
3404 */
3405 mem_cgroup_uncharge_page(page);
3406 }
3407 return -ENOMEM;
e8589cc1 3408 }
ac39cf8c
AM
3409 /*
3410 * We charge new page before it's used/mapped. So, even if unlock_page()
3411 * is called before end_migration, we can catch all events on this new
3412 * page. In the case new page is migrated but not remapped, new page's
3413 * mapcount will be finally 0 and we call uncharge in end_migration().
3414 */
3415 pc = lookup_page_cgroup(newpage);
3416 if (PageAnon(page))
3417 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3418 else if (page_is_file_cache(page))
3419 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3420 else
3421 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
7ec99d62 3422 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
e8589cc1 3423 return ret;
ae41be37 3424}
8869b8f6 3425
69029cd5 3426/* remove redundant charge if migration failed*/
01b1ae63 3427void mem_cgroup_end_migration(struct mem_cgroup *mem,
50de1dd9 3428 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37 3429{
ac39cf8c 3430 struct page *used, *unused;
01b1ae63 3431 struct page_cgroup *pc;
01b1ae63
KH
3432
3433 if (!mem)
3434 return;
ac39cf8c 3435 /* blocks rmdir() */
88703267 3436 cgroup_exclude_rmdir(&mem->css);
50de1dd9 3437 if (!migration_ok) {
ac39cf8c
AM
3438 used = oldpage;
3439 unused = newpage;
01b1ae63 3440 } else {
ac39cf8c 3441 used = newpage;
01b1ae63
KH
3442 unused = oldpage;
3443 }
69029cd5 3444 /*
ac39cf8c
AM
3445 * We disallowed uncharge of pages under migration because mapcount
3446 * of the page goes down to zero, temporarly.
3447 * Clear the flag and check the page should be charged.
01b1ae63 3448 */
ac39cf8c
AM
3449 pc = lookup_page_cgroup(oldpage);
3450 lock_page_cgroup(pc);
3451 ClearPageCgroupMigration(pc);
3452 unlock_page_cgroup(pc);
01b1ae63 3453
ac39cf8c
AM
3454 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3455
01b1ae63 3456 /*
ac39cf8c
AM
3457 * If a page is a file cache, radix-tree replacement is very atomic
3458 * and we can skip this check. When it was an Anon page, its mapcount
3459 * goes down to 0. But because we added MIGRATION flage, it's not
3460 * uncharged yet. There are several case but page->mapcount check
3461 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3462 * check. (see prepare_charge() also)
69029cd5 3463 */
ac39cf8c
AM
3464 if (PageAnon(used))
3465 mem_cgroup_uncharge_page(used);
88703267 3466 /*
ac39cf8c
AM
3467 * At migration, we may charge account against cgroup which has no
3468 * tasks.
88703267
KH
3469 * So, rmdir()->pre_destroy() can be called while we do this charge.
3470 * In that case, we need to call pre_destroy() again. check it here.
3471 */
3472 cgroup_release_and_wakeup_rmdir(&mem->css);
ae41be37 3473}
78fb7466 3474
c9b0ed51 3475/*
ae3abae6
DN
3476 * A call to try to shrink memory usage on charge failure at shmem's swapin.
3477 * Calling hierarchical_reclaim is not enough because we should update
3478 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
3479 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
3480 * not from the memcg which this page would be charged to.
3481 * try_charge_swapin does all of these works properly.
c9b0ed51 3482 */
ae3abae6 3483int mem_cgroup_shmem_charge_fallback(struct page *page,
b5a84319
KH
3484 struct mm_struct *mm,
3485 gfp_t gfp_mask)
c9b0ed51 3486{
56039efa 3487 struct mem_cgroup *mem;
ae3abae6 3488 int ret;
c9b0ed51 3489
f8d66542 3490 if (mem_cgroup_disabled())
cede86ac 3491 return 0;
c9b0ed51 3492
ae3abae6
DN
3493 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
3494 if (!ret)
3495 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
c9b0ed51 3496
ae3abae6 3497 return ret;
c9b0ed51
KH
3498}
3499
f212ad7c
DN
3500#ifdef CONFIG_DEBUG_VM
3501static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3502{
3503 struct page_cgroup *pc;
3504
3505 pc = lookup_page_cgroup(page);
3506 if (likely(pc) && PageCgroupUsed(pc))
3507 return pc;
3508 return NULL;
3509}
3510
3511bool mem_cgroup_bad_page_check(struct page *page)
3512{
3513 if (mem_cgroup_disabled())
3514 return false;
3515
3516 return lookup_page_cgroup_used(page) != NULL;
3517}
3518
3519void mem_cgroup_print_bad_page(struct page *page)
3520{
3521 struct page_cgroup *pc;
3522
3523 pc = lookup_page_cgroup_used(page);
3524 if (pc) {
3525 int ret = -1;
3526 char *path;
3527
3528 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3529 pc, pc->flags, pc->mem_cgroup);
3530
3531 path = kmalloc(PATH_MAX, GFP_KERNEL);
3532 if (path) {
3533 rcu_read_lock();
3534 ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3535 path, PATH_MAX);
3536 rcu_read_unlock();
3537 }
3538
3539 printk(KERN_CONT "(%s)\n",
3540 (ret < 0) ? "cannot get the path" : path);
3541 kfree(path);
3542 }
3543}
3544#endif
3545
8c7c6e34
KH
3546static DEFINE_MUTEX(set_limit_mutex);
3547
d38d2a75 3548static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34 3549 unsigned long long val)
628f4235 3550{
81d39c20 3551 int retry_count;
3c11ecf4 3552 u64 memswlimit, memlimit;
628f4235 3553 int ret = 0;
81d39c20
KH
3554 int children = mem_cgroup_count_children(memcg);
3555 u64 curusage, oldusage;
3c11ecf4 3556 int enlarge;
81d39c20
KH
3557
3558 /*
3559 * For keeping hierarchical_reclaim simple, how long we should retry
3560 * is depends on callers. We set our retry-count to be function
3561 * of # of children which we should visit in this loop.
3562 */
3563 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3564
3565 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
628f4235 3566
3c11ecf4 3567 enlarge = 0;
8c7c6e34 3568 while (retry_count) {
628f4235
KH
3569 if (signal_pending(current)) {
3570 ret = -EINTR;
3571 break;
3572 }
8c7c6e34
KH
3573 /*
3574 * Rather than hide all in some function, I do this in
3575 * open coded manner. You see what this really does.
3576 * We have to guarantee mem->res.limit < mem->memsw.limit.
3577 */
3578 mutex_lock(&set_limit_mutex);
3579 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3580 if (memswlimit < val) {
3581 ret = -EINVAL;
3582 mutex_unlock(&set_limit_mutex);
628f4235
KH
3583 break;
3584 }
3c11ecf4
KH
3585
3586 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3587 if (memlimit < val)
3588 enlarge = 1;
3589
8c7c6e34 3590 ret = res_counter_set_limit(&memcg->res, val);
22a668d7
KH
3591 if (!ret) {
3592 if (memswlimit == val)
3593 memcg->memsw_is_minimum = true;
3594 else
3595 memcg->memsw_is_minimum = false;
3596 }
8c7c6e34
KH
3597 mutex_unlock(&set_limit_mutex);
3598
3599 if (!ret)
3600 break;
3601
aa20d489 3602 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
0ae5e89c
YH
3603 MEM_CGROUP_RECLAIM_SHRINK,
3604 NULL);
81d39c20
KH
3605 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3606 /* Usage is reduced ? */
3607 if (curusage >= oldusage)
3608 retry_count--;
3609 else
3610 oldusage = curusage;
8c7c6e34 3611 }
3c11ecf4
KH
3612 if (!ret && enlarge)
3613 memcg_oom_recover(memcg);
14797e23 3614
8c7c6e34
KH
3615 return ret;
3616}
3617
338c8431
LZ
3618static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3619 unsigned long long val)
8c7c6e34 3620{
81d39c20 3621 int retry_count;
3c11ecf4 3622 u64 memlimit, memswlimit, oldusage, curusage;
81d39c20
KH
3623 int children = mem_cgroup_count_children(memcg);
3624 int ret = -EBUSY;
3c11ecf4 3625 int enlarge = 0;
8c7c6e34 3626
81d39c20
KH
3627 /* see mem_cgroup_resize_res_limit */
3628 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3629 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
8c7c6e34
KH
3630 while (retry_count) {
3631 if (signal_pending(current)) {
3632 ret = -EINTR;
3633 break;
3634 }
3635 /*
3636 * Rather than hide all in some function, I do this in
3637 * open coded manner. You see what this really does.
3638 * We have to guarantee mem->res.limit < mem->memsw.limit.
3639 */
3640 mutex_lock(&set_limit_mutex);
3641 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3642 if (memlimit > val) {
3643 ret = -EINVAL;
3644 mutex_unlock(&set_limit_mutex);
3645 break;
3646 }
3c11ecf4
KH
3647 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3648 if (memswlimit < val)
3649 enlarge = 1;
8c7c6e34 3650 ret = res_counter_set_limit(&memcg->memsw, val);
22a668d7
KH
3651 if (!ret) {
3652 if (memlimit == val)
3653 memcg->memsw_is_minimum = true;
3654 else
3655 memcg->memsw_is_minimum = false;
3656 }
8c7c6e34
KH
3657 mutex_unlock(&set_limit_mutex);
3658
3659 if (!ret)
3660 break;
3661
4e416953 3662 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
75822b44 3663 MEM_CGROUP_RECLAIM_NOSWAP |
0ae5e89c
YH
3664 MEM_CGROUP_RECLAIM_SHRINK,
3665 NULL);
8c7c6e34 3666 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
81d39c20 3667 /* Usage is reduced ? */
8c7c6e34 3668 if (curusage >= oldusage)
628f4235 3669 retry_count--;
81d39c20
KH
3670 else
3671 oldusage = curusage;
628f4235 3672 }
3c11ecf4
KH
3673 if (!ret && enlarge)
3674 memcg_oom_recover(memcg);
628f4235
KH
3675 return ret;
3676}
3677
4e416953 3678unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
3679 gfp_t gfp_mask,
3680 unsigned long *total_scanned)
4e416953
BS
3681{
3682 unsigned long nr_reclaimed = 0;
3683 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3684 unsigned long reclaimed;
3685 int loop = 0;
3686 struct mem_cgroup_tree_per_zone *mctz;
ef8745c1 3687 unsigned long long excess;
0ae5e89c 3688 unsigned long nr_scanned;
4e416953
BS
3689
3690 if (order > 0)
3691 return 0;
3692
00918b6a 3693 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4e416953
BS
3694 /*
3695 * This loop can run a while, specially if mem_cgroup's continuously
3696 * keep exceeding their soft limit and putting the system under
3697 * pressure
3698 */
3699 do {
3700 if (next_mz)
3701 mz = next_mz;
3702 else
3703 mz = mem_cgroup_largest_soft_limit_node(mctz);
3704 if (!mz)
3705 break;
3706
0ae5e89c 3707 nr_scanned = 0;
4e416953
BS
3708 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3709 gfp_mask,
0ae5e89c
YH
3710 MEM_CGROUP_RECLAIM_SOFT,
3711 &nr_scanned);
4e416953 3712 nr_reclaimed += reclaimed;
0ae5e89c 3713 *total_scanned += nr_scanned;
4e416953
BS
3714 spin_lock(&mctz->lock);
3715
3716 /*
3717 * If we failed to reclaim anything from this memory cgroup
3718 * it is time to move on to the next cgroup
3719 */
3720 next_mz = NULL;
3721 if (!reclaimed) {
3722 do {
3723 /*
3724 * Loop until we find yet another one.
3725 *
3726 * By the time we get the soft_limit lock
3727 * again, someone might have aded the
3728 * group back on the RB tree. Iterate to
3729 * make sure we get a different mem.
3730 * mem_cgroup_largest_soft_limit_node returns
3731 * NULL if no other cgroup is present on
3732 * the tree
3733 */
3734 next_mz =
3735 __mem_cgroup_largest_soft_limit_node(mctz);
39cc98f1 3736 if (next_mz == mz)
4e416953 3737 css_put(&next_mz->mem->css);
39cc98f1 3738 else /* next_mz == NULL or other memcg */
4e416953
BS
3739 break;
3740 } while (1);
3741 }
4e416953 3742 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
ef8745c1 3743 excess = res_counter_soft_limit_excess(&mz->mem->res);
4e416953
BS
3744 /*
3745 * One school of thought says that we should not add
3746 * back the node to the tree if reclaim returns 0.
3747 * But our reclaim could return 0, simply because due
3748 * to priority we are exposing a smaller subset of
3749 * memory to reclaim from. Consider this as a longer
3750 * term TODO.
3751 */
ef8745c1
KH
3752 /* If excess == 0, no tree ops */
3753 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
4e416953
BS
3754 spin_unlock(&mctz->lock);
3755 css_put(&mz->mem->css);
3756 loop++;
3757 /*
3758 * Could not reclaim anything and there are no more
3759 * mem cgroups to try or we seem to be looping without
3760 * reclaiming anything.
3761 */
3762 if (!nr_reclaimed &&
3763 (next_mz == NULL ||
3764 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3765 break;
3766 } while (!nr_reclaimed);
3767 if (next_mz)
3768 css_put(&next_mz->mem->css);
3769 return nr_reclaimed;
3770}
3771
cc847582
KH
3772/*
3773 * This routine traverse page_cgroup in given list and drop them all.
cc847582
KH
3774 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3775 */
f817ed48 3776static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
08e552c6 3777 int node, int zid, enum lru_list lru)
cc847582 3778{
08e552c6
KH
3779 struct zone *zone;
3780 struct mem_cgroup_per_zone *mz;
f817ed48 3781 struct page_cgroup *pc, *busy;
08e552c6 3782 unsigned long flags, loop;
072c56c1 3783 struct list_head *list;
f817ed48 3784 int ret = 0;
072c56c1 3785
08e552c6
KH
3786 zone = &NODE_DATA(node)->node_zones[zid];
3787 mz = mem_cgroup_zoneinfo(mem, node, zid);
b69408e8 3788 list = &mz->lists[lru];
cc847582 3789
f817ed48
KH
3790 loop = MEM_CGROUP_ZSTAT(mz, lru);
3791 /* give some margin against EBUSY etc...*/
3792 loop += 256;
3793 busy = NULL;
3794 while (loop--) {
5564e88b
JW
3795 struct page *page;
3796
f817ed48 3797 ret = 0;
08e552c6 3798 spin_lock_irqsave(&zone->lru_lock, flags);
f817ed48 3799 if (list_empty(list)) {
08e552c6 3800 spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac 3801 break;
f817ed48
KH
3802 }
3803 pc = list_entry(list->prev, struct page_cgroup, lru);
3804 if (busy == pc) {
3805 list_move(&pc->lru, list);
648bcc77 3806 busy = NULL;
08e552c6 3807 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48
KH
3808 continue;
3809 }
08e552c6 3810 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48 3811
6b3ae58e 3812 page = lookup_cgroup_page(pc);
5564e88b
JW
3813
3814 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
f817ed48 3815 if (ret == -ENOMEM)
52d4b9ac 3816 break;
f817ed48
KH
3817
3818 if (ret == -EBUSY || ret == -EINVAL) {
3819 /* found lock contention or "pc" is obsolete. */
3820 busy = pc;
3821 cond_resched();
3822 } else
3823 busy = NULL;
cc847582 3824 }
08e552c6 3825
f817ed48
KH
3826 if (!ret && !list_empty(list))
3827 return -EBUSY;
3828 return ret;
cc847582
KH
3829}
3830
3831/*
3832 * make mem_cgroup's charge to be 0 if there is no task.
3833 * This enables deleting this mem_cgroup.
3834 */
c1e862c1 3835static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
cc847582 3836{
f817ed48
KH
3837 int ret;
3838 int node, zid, shrink;
3839 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c1e862c1 3840 struct cgroup *cgrp = mem->css.cgroup;
8869b8f6 3841
cc847582 3842 css_get(&mem->css);
f817ed48
KH
3843
3844 shrink = 0;
c1e862c1
KH
3845 /* should free all ? */
3846 if (free_all)
3847 goto try_to_free;
f817ed48 3848move_account:
fce66477 3849 do {
f817ed48 3850 ret = -EBUSY;
c1e862c1
KH
3851 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3852 goto out;
3853 ret = -EINTR;
3854 if (signal_pending(current))
cc847582 3855 goto out;
52d4b9ac
KH
3856 /* This is for making all *used* pages to be on LRU. */
3857 lru_add_drain_all();
cdec2e42 3858 drain_all_stock_sync();
f817ed48 3859 ret = 0;
32047e2a 3860 mem_cgroup_start_move(mem);
299b4eaa 3861 for_each_node_state(node, N_HIGH_MEMORY) {
f817ed48 3862 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
b69408e8 3863 enum lru_list l;
f817ed48
KH
3864 for_each_lru(l) {
3865 ret = mem_cgroup_force_empty_list(mem,
08e552c6 3866 node, zid, l);
f817ed48
KH
3867 if (ret)
3868 break;
3869 }
1ecaab2b 3870 }
f817ed48
KH
3871 if (ret)
3872 break;
3873 }
32047e2a 3874 mem_cgroup_end_move(mem);
3c11ecf4 3875 memcg_oom_recover(mem);
f817ed48
KH
3876 /* it seems parent cgroup doesn't have enough mem */
3877 if (ret == -ENOMEM)
3878 goto try_to_free;
52d4b9ac 3879 cond_resched();
fce66477
DN
3880 /* "ret" should also be checked to ensure all lists are empty. */
3881 } while (mem->res.usage > 0 || ret);
cc847582
KH
3882out:
3883 css_put(&mem->css);
3884 return ret;
f817ed48
KH
3885
3886try_to_free:
c1e862c1
KH
3887 /* returns EBUSY if there is a task or if we come here twice. */
3888 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
f817ed48
KH
3889 ret = -EBUSY;
3890 goto out;
3891 }
c1e862c1
KH
3892 /* we call try-to-free pages for make this cgroup empty */
3893 lru_add_drain_all();
f817ed48
KH
3894 /* try to free all pages in this cgroup */
3895 shrink = 1;
3896 while (nr_retries && mem->res.usage > 0) {
82f9d486 3897 struct memcg_scanrecord rec;
f817ed48 3898 int progress;
c1e862c1
KH
3899
3900 if (signal_pending(current)) {
3901 ret = -EINTR;
3902 goto out;
3903 }
82f9d486
KH
3904 rec.context = SCAN_BY_SHRINK;
3905 rec.mem = mem;
3906 rec.root = mem;
a7885eb8 3907 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
82f9d486 3908 false, &rec);
c1e862c1 3909 if (!progress) {
f817ed48 3910 nr_retries--;
c1e862c1 3911 /* maybe some writeback is necessary */
8aa7e847 3912 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 3913 }
f817ed48
KH
3914
3915 }
08e552c6 3916 lru_add_drain();
f817ed48 3917 /* try move_account...there may be some *locked* pages. */
fce66477 3918 goto move_account;
cc847582
KH
3919}
3920
c1e862c1
KH
3921int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3922{
3923 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3924}
3925
3926
18f59ea7
BS
3927static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3928{
3929 return mem_cgroup_from_cont(cont)->use_hierarchy;
3930}
3931
3932static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3933 u64 val)
3934{
3935 int retval = 0;
3936 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3937 struct cgroup *parent = cont->parent;
3938 struct mem_cgroup *parent_mem = NULL;
3939
3940 if (parent)
3941 parent_mem = mem_cgroup_from_cont(parent);
3942
3943 cgroup_lock();
3944 /*
af901ca1 3945 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
3946 * in the child subtrees. If it is unset, then the change can
3947 * occur, provided the current cgroup has no children.
3948 *
3949 * For the root cgroup, parent_mem is NULL, we allow value to be
3950 * set if there are no children.
3951 */
3952 if ((!parent_mem || !parent_mem->use_hierarchy) &&
3953 (val == 1 || val == 0)) {
3954 if (list_empty(&cont->children))
3955 mem->use_hierarchy = val;
3956 else
3957 retval = -EBUSY;
3958 } else
3959 retval = -EINVAL;
3960 cgroup_unlock();
3961
3962 return retval;
3963}
3964
0c3e73e8 3965
7a159cc9
JW
3966static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
3967 enum mem_cgroup_stat_index idx)
0c3e73e8 3968{
7d74b06f 3969 struct mem_cgroup *iter;
7a159cc9 3970 long val = 0;
0c3e73e8 3971
7a159cc9 3972 /* Per-cpu values can be negative, use a signed accumulator */
7d74b06f
KH
3973 for_each_mem_cgroup_tree(iter, mem)
3974 val += mem_cgroup_read_stat(iter, idx);
3975
3976 if (val < 0) /* race ? */
3977 val = 0;
3978 return val;
0c3e73e8
BS
3979}
3980
104f3928
KS
3981static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3982{
7d74b06f 3983 u64 val;
104f3928
KS
3984
3985 if (!mem_cgroup_is_root(mem)) {
3986 if (!swap)
3987 return res_counter_read_u64(&mem->res, RES_USAGE);
3988 else
3989 return res_counter_read_u64(&mem->memsw, RES_USAGE);
3990 }
3991
7a159cc9
JW
3992 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
3993 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
104f3928 3994
7d74b06f 3995 if (swap)
7a159cc9 3996 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
104f3928
KS
3997
3998 return val << PAGE_SHIFT;
3999}
4000
2c3daa72 4001static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c0 4002{
8c7c6e34 4003 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
104f3928 4004 u64 val;
8c7c6e34
KH
4005 int type, name;
4006
4007 type = MEMFILE_TYPE(cft->private);
4008 name = MEMFILE_ATTR(cft->private);
4009 switch (type) {
4010 case _MEM:
104f3928
KS
4011 if (name == RES_USAGE)
4012 val = mem_cgroup_usage(mem, false);
4013 else
0c3e73e8 4014 val = res_counter_read_u64(&mem->res, name);
8c7c6e34
KH
4015 break;
4016 case _MEMSWAP:
104f3928
KS
4017 if (name == RES_USAGE)
4018 val = mem_cgroup_usage(mem, true);
4019 else
0c3e73e8 4020 val = res_counter_read_u64(&mem->memsw, name);
8c7c6e34
KH
4021 break;
4022 default:
4023 BUG();
4024 break;
4025 }
4026 return val;
8cdea7c0 4027}
628f4235
KH
4028/*
4029 * The user of this function is...
4030 * RES_LIMIT.
4031 */
856c13aa
PM
4032static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
4033 const char *buffer)
8cdea7c0 4034{
628f4235 4035 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
8c7c6e34 4036 int type, name;
628f4235
KH
4037 unsigned long long val;
4038 int ret;
4039
8c7c6e34
KH
4040 type = MEMFILE_TYPE(cft->private);
4041 name = MEMFILE_ATTR(cft->private);
4042 switch (name) {
628f4235 4043 case RES_LIMIT:
4b3bde4c
BS
4044 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4045 ret = -EINVAL;
4046 break;
4047 }
628f4235
KH
4048 /* This function does all necessary parse...reuse it */
4049 ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34
KH
4050 if (ret)
4051 break;
4052 if (type == _MEM)
628f4235 4053 ret = mem_cgroup_resize_limit(memcg, val);
8c7c6e34
KH
4054 else
4055 ret = mem_cgroup_resize_memsw_limit(memcg, val);
628f4235 4056 break;
296c81d8
BS
4057 case RES_SOFT_LIMIT:
4058 ret = res_counter_memparse_write_strategy(buffer, &val);
4059 if (ret)
4060 break;
4061 /*
4062 * For memsw, soft limits are hard to implement in terms
4063 * of semantics, for now, we support soft limits for
4064 * control without swap
4065 */
4066 if (type == _MEM)
4067 ret = res_counter_set_soft_limit(&memcg->res, val);
4068 else
4069 ret = -EINVAL;
4070 break;
628f4235
KH
4071 default:
4072 ret = -EINVAL; /* should be BUG() ? */
4073 break;
4074 }
4075 return ret;
8cdea7c0
BS
4076}
4077
fee7b548
KH
4078static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4079 unsigned long long *mem_limit, unsigned long long *memsw_limit)
4080{
4081 struct cgroup *cgroup;
4082 unsigned long long min_limit, min_memsw_limit, tmp;
4083
4084 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4085 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4086 cgroup = memcg->css.cgroup;
4087 if (!memcg->use_hierarchy)
4088 goto out;
4089
4090 while (cgroup->parent) {
4091 cgroup = cgroup->parent;
4092 memcg = mem_cgroup_from_cont(cgroup);
4093 if (!memcg->use_hierarchy)
4094 break;
4095 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4096 min_limit = min(min_limit, tmp);
4097 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4098 min_memsw_limit = min(min_memsw_limit, tmp);
4099 }
4100out:
4101 *mem_limit = min_limit;
4102 *memsw_limit = min_memsw_limit;
4103 return;
4104}
4105
29f2a4da 4106static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1
PE
4107{
4108 struct mem_cgroup *mem;
8c7c6e34 4109 int type, name;
c84872e1
PE
4110
4111 mem = mem_cgroup_from_cont(cont);
8c7c6e34
KH
4112 type = MEMFILE_TYPE(event);
4113 name = MEMFILE_ATTR(event);
4114 switch (name) {
29f2a4da 4115 case RES_MAX_USAGE:
8c7c6e34
KH
4116 if (type == _MEM)
4117 res_counter_reset_max(&mem->res);
4118 else
4119 res_counter_reset_max(&mem->memsw);
29f2a4da
PE
4120 break;
4121 case RES_FAILCNT:
8c7c6e34
KH
4122 if (type == _MEM)
4123 res_counter_reset_failcnt(&mem->res);
4124 else
4125 res_counter_reset_failcnt(&mem->memsw);
29f2a4da
PE
4126 break;
4127 }
f64c3f54 4128
85cc59db 4129 return 0;
c84872e1
PE
4130}
4131
7dc74be0
DN
4132static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
4133 struct cftype *cft)
4134{
4135 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
4136}
4137
02491447 4138#ifdef CONFIG_MMU
7dc74be0
DN
4139static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4140 struct cftype *cft, u64 val)
4141{
4142 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4143
4144 if (val >= (1 << NR_MOVE_TYPE))
4145 return -EINVAL;
4146 /*
4147 * We check this value several times in both in can_attach() and
4148 * attach(), so we need cgroup lock to prevent this value from being
4149 * inconsistent.
4150 */
4151 cgroup_lock();
4152 mem->move_charge_at_immigrate = val;
4153 cgroup_unlock();
4154
4155 return 0;
4156}
02491447
DN
4157#else
4158static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4159 struct cftype *cft, u64 val)
4160{
4161 return -ENOSYS;
4162}
4163#endif
7dc74be0 4164
14067bb3
KH
4165
4166/* For read statistics */
4167enum {
4168 MCS_CACHE,
4169 MCS_RSS,
d8046582 4170 MCS_FILE_MAPPED,
14067bb3
KH
4171 MCS_PGPGIN,
4172 MCS_PGPGOUT,
1dd3a273 4173 MCS_SWAP,
456f998e
YH
4174 MCS_PGFAULT,
4175 MCS_PGMAJFAULT,
14067bb3
KH
4176 MCS_INACTIVE_ANON,
4177 MCS_ACTIVE_ANON,
4178 MCS_INACTIVE_FILE,
4179 MCS_ACTIVE_FILE,
4180 MCS_UNEVICTABLE,
4181 NR_MCS_STAT,
4182};
4183
4184struct mcs_total_stat {
4185 s64 stat[NR_MCS_STAT];
d2ceb9b7
KH
4186};
4187
14067bb3
KH
4188struct {
4189 char *local_name;
4190 char *total_name;
4191} memcg_stat_strings[NR_MCS_STAT] = {
4192 {"cache", "total_cache"},
4193 {"rss", "total_rss"},
d69b042f 4194 {"mapped_file", "total_mapped_file"},
14067bb3
KH
4195 {"pgpgin", "total_pgpgin"},
4196 {"pgpgout", "total_pgpgout"},
1dd3a273 4197 {"swap", "total_swap"},
456f998e
YH
4198 {"pgfault", "total_pgfault"},
4199 {"pgmajfault", "total_pgmajfault"},
14067bb3
KH
4200 {"inactive_anon", "total_inactive_anon"},
4201 {"active_anon", "total_active_anon"},
4202 {"inactive_file", "total_inactive_file"},
4203 {"active_file", "total_active_file"},
4204 {"unevictable", "total_unevictable"}
4205};
4206
4207
7d74b06f
KH
4208static void
4209mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
14067bb3 4210{
14067bb3
KH
4211 s64 val;
4212
4213 /* per cpu stat */
c62b1a3b 4214 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
14067bb3 4215 s->stat[MCS_CACHE] += val * PAGE_SIZE;
c62b1a3b 4216 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
14067bb3 4217 s->stat[MCS_RSS] += val * PAGE_SIZE;
c62b1a3b 4218 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
d8046582 4219 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
e9f8974f 4220 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
14067bb3 4221 s->stat[MCS_PGPGIN] += val;
e9f8974f 4222 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
14067bb3 4223 s->stat[MCS_PGPGOUT] += val;
1dd3a273 4224 if (do_swap_account) {
c62b1a3b 4225 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
1dd3a273
DN
4226 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4227 }
456f998e
YH
4228 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4229 s->stat[MCS_PGFAULT] += val;
4230 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4231 s->stat[MCS_PGMAJFAULT] += val;
14067bb3
KH
4232
4233 /* per zone stat */
bb2a0de9 4234 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON));
14067bb3 4235 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
bb2a0de9 4236 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON));
14067bb3 4237 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
bb2a0de9 4238 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE));
14067bb3 4239 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
bb2a0de9 4240 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE));
14067bb3 4241 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
bb2a0de9 4242 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE));
14067bb3 4243 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
14067bb3
KH
4244}
4245
4246static void
4247mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4248{
7d74b06f
KH
4249 struct mem_cgroup *iter;
4250
4251 for_each_mem_cgroup_tree(iter, mem)
4252 mem_cgroup_get_local_stat(iter, s);
14067bb3
KH
4253}
4254
406eb0c9
YH
4255#ifdef CONFIG_NUMA
4256static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4257{
4258 int nid;
4259 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4260 unsigned long node_nr;
4261 struct cgroup *cont = m->private;
4262 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4263
bb2a0de9 4264 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
406eb0c9
YH
4265 seq_printf(m, "total=%lu", total_nr);
4266 for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de9 4267 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
406eb0c9
YH
4268 seq_printf(m, " N%d=%lu", nid, node_nr);
4269 }
4270 seq_putc(m, '\n');
4271
bb2a0de9 4272 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
406eb0c9
YH
4273 seq_printf(m, "file=%lu", file_nr);
4274 for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de9
KH
4275 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4276 LRU_ALL_FILE);
406eb0c9
YH
4277 seq_printf(m, " N%d=%lu", nid, node_nr);
4278 }
4279 seq_putc(m, '\n');
4280
bb2a0de9 4281 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
406eb0c9
YH
4282 seq_printf(m, "anon=%lu", anon_nr);
4283 for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de9
KH
4284 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4285 LRU_ALL_ANON);
406eb0c9
YH
4286 seq_printf(m, " N%d=%lu", nid, node_nr);
4287 }
4288 seq_putc(m, '\n');
4289
bb2a0de9 4290 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
406eb0c9
YH
4291 seq_printf(m, "unevictable=%lu", unevictable_nr);
4292 for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de9
KH
4293 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4294 BIT(LRU_UNEVICTABLE));
406eb0c9
YH
4295 seq_printf(m, " N%d=%lu", nid, node_nr);
4296 }
4297 seq_putc(m, '\n');
4298 return 0;
4299}
4300#endif /* CONFIG_NUMA */
4301
c64745cf
PM
4302static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4303 struct cgroup_map_cb *cb)
d2ceb9b7 4304{
d2ceb9b7 4305 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
14067bb3 4306 struct mcs_total_stat mystat;
d2ceb9b7
KH
4307 int i;
4308
14067bb3
KH
4309 memset(&mystat, 0, sizeof(mystat));
4310 mem_cgroup_get_local_stat(mem_cont, &mystat);
d2ceb9b7 4311
406eb0c9 4312
1dd3a273
DN
4313 for (i = 0; i < NR_MCS_STAT; i++) {
4314 if (i == MCS_SWAP && !do_swap_account)
4315 continue;
14067bb3 4316 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
1dd3a273 4317 }
7b854121 4318
14067bb3 4319 /* Hierarchical information */
fee7b548
KH
4320 {
4321 unsigned long long limit, memsw_limit;
4322 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4323 cb->fill(cb, "hierarchical_memory_limit", limit);
4324 if (do_swap_account)
4325 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4326 }
7f016ee8 4327
14067bb3
KH
4328 memset(&mystat, 0, sizeof(mystat));
4329 mem_cgroup_get_total_stat(mem_cont, &mystat);
1dd3a273
DN
4330 for (i = 0; i < NR_MCS_STAT; i++) {
4331 if (i == MCS_SWAP && !do_swap_account)
4332 continue;
14067bb3 4333 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
1dd3a273 4334 }
14067bb3 4335
7f016ee8 4336#ifdef CONFIG_DEBUG_VM
c772be93 4337 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
7f016ee8
KM
4338
4339 {
4340 int nid, zid;
4341 struct mem_cgroup_per_zone *mz;
4342 unsigned long recent_rotated[2] = {0, 0};
4343 unsigned long recent_scanned[2] = {0, 0};
4344
4345 for_each_online_node(nid)
4346 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4347 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4348
4349 recent_rotated[0] +=
4350 mz->reclaim_stat.recent_rotated[0];
4351 recent_rotated[1] +=
4352 mz->reclaim_stat.recent_rotated[1];
4353 recent_scanned[0] +=
4354 mz->reclaim_stat.recent_scanned[0];
4355 recent_scanned[1] +=
4356 mz->reclaim_stat.recent_scanned[1];
4357 }
4358 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4359 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4360 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4361 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4362 }
4363#endif
4364
d2ceb9b7
KH
4365 return 0;
4366}
4367
a7885eb8
KM
4368static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4369{
4370 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4371
1f4c025b 4372 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4373}
4374
4375static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4376 u64 val)
4377{
4378 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4379 struct mem_cgroup *parent;
068b38c1 4380
a7885eb8
KM
4381 if (val > 100)
4382 return -EINVAL;
4383
4384 if (cgrp->parent == NULL)
4385 return -EINVAL;
4386
4387 parent = mem_cgroup_from_cont(cgrp->parent);
068b38c1
LZ
4388
4389 cgroup_lock();
4390
a7885eb8
KM
4391 /* If under hierarchy, only empty-root can set this value */
4392 if ((parent->use_hierarchy) ||
068b38c1
LZ
4393 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4394 cgroup_unlock();
a7885eb8 4395 return -EINVAL;
068b38c1 4396 }
a7885eb8 4397
a7885eb8 4398 memcg->swappiness = val;
a7885eb8 4399
068b38c1
LZ
4400 cgroup_unlock();
4401
a7885eb8
KM
4402 return 0;
4403}
4404
2e72b634
KS
4405static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4406{
4407 struct mem_cgroup_threshold_ary *t;
4408 u64 usage;
4409 int i;
4410
4411 rcu_read_lock();
4412 if (!swap)
2c488db2 4413 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4414 else
2c488db2 4415 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4416
4417 if (!t)
4418 goto unlock;
4419
4420 usage = mem_cgroup_usage(memcg, swap);
4421
4422 /*
4423 * current_threshold points to threshold just below usage.
4424 * If it's not true, a threshold was crossed after last
4425 * call of __mem_cgroup_threshold().
4426 */
5407a562 4427 i = t->current_threshold;
2e72b634
KS
4428
4429 /*
4430 * Iterate backward over array of thresholds starting from
4431 * current_threshold and check if a threshold is crossed.
4432 * If none of thresholds below usage is crossed, we read
4433 * only one element of the array here.
4434 */
4435 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4436 eventfd_signal(t->entries[i].eventfd, 1);
4437
4438 /* i = current_threshold + 1 */
4439 i++;
4440
4441 /*
4442 * Iterate forward over array of thresholds starting from
4443 * current_threshold+1 and check if a threshold is crossed.
4444 * If none of thresholds above usage is crossed, we read
4445 * only one element of the array here.
4446 */
4447 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4448 eventfd_signal(t->entries[i].eventfd, 1);
4449
4450 /* Update current_threshold */
5407a562 4451 t->current_threshold = i - 1;
2e72b634
KS
4452unlock:
4453 rcu_read_unlock();
4454}
4455
4456static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4457{
ad4ca5f4
KS
4458 while (memcg) {
4459 __mem_cgroup_threshold(memcg, false);
4460 if (do_swap_account)
4461 __mem_cgroup_threshold(memcg, true);
4462
4463 memcg = parent_mem_cgroup(memcg);
4464 }
2e72b634
KS
4465}
4466
4467static int compare_thresholds(const void *a, const void *b)
4468{
4469 const struct mem_cgroup_threshold *_a = a;
4470 const struct mem_cgroup_threshold *_b = b;
4471
4472 return _a->threshold - _b->threshold;
4473}
4474
7d74b06f 4475static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
9490ff27
KH
4476{
4477 struct mem_cgroup_eventfd_list *ev;
4478
4479 list_for_each_entry(ev, &mem->oom_notify, list)
4480 eventfd_signal(ev->eventfd, 1);
4481 return 0;
4482}
4483
4484static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
4485{
7d74b06f
KH
4486 struct mem_cgroup *iter;
4487
4488 for_each_mem_cgroup_tree(iter, mem)
4489 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4490}
4491
4492static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4493 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
2e72b634
KS
4494{
4495 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
4496 struct mem_cgroup_thresholds *thresholds;
4497 struct mem_cgroup_threshold_ary *new;
2e72b634
KS
4498 int type = MEMFILE_TYPE(cft->private);
4499 u64 threshold, usage;
2c488db2 4500 int i, size, ret;
2e72b634
KS
4501
4502 ret = res_counter_memparse_write_strategy(args, &threshold);
4503 if (ret)
4504 return ret;
4505
4506 mutex_lock(&memcg->thresholds_lock);
2c488db2 4507
2e72b634 4508 if (type == _MEM)
2c488db2 4509 thresholds = &memcg->thresholds;
2e72b634 4510 else if (type == _MEMSWAP)
2c488db2 4511 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
4512 else
4513 BUG();
4514
4515 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4516
4517 /* Check if a threshold crossed before adding a new one */
2c488db2 4518 if (thresholds->primary)
2e72b634
KS
4519 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4520
2c488db2 4521 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4522
4523 /* Allocate memory for new array of thresholds */
2c488db2 4524 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 4525 GFP_KERNEL);
2c488db2 4526 if (!new) {
2e72b634
KS
4527 ret = -ENOMEM;
4528 goto unlock;
4529 }
2c488db2 4530 new->size = size;
2e72b634
KS
4531
4532 /* Copy thresholds (if any) to new array */
2c488db2
KS
4533 if (thresholds->primary) {
4534 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 4535 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
4536 }
4537
2e72b634 4538 /* Add new threshold */
2c488db2
KS
4539 new->entries[size - 1].eventfd = eventfd;
4540 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4541
4542 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 4543 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
4544 compare_thresholds, NULL);
4545
4546 /* Find current threshold */
2c488db2 4547 new->current_threshold = -1;
2e72b634 4548 for (i = 0; i < size; i++) {
2c488db2 4549 if (new->entries[i].threshold < usage) {
2e72b634 4550 /*
2c488db2
KS
4551 * new->current_threshold will not be used until
4552 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4553 * it here.
4554 */
2c488db2 4555 ++new->current_threshold;
2e72b634
KS
4556 }
4557 }
4558
2c488db2
KS
4559 /* Free old spare buffer and save old primary buffer as spare */
4560 kfree(thresholds->spare);
4561 thresholds->spare = thresholds->primary;
4562
4563 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4564
907860ed 4565 /* To be sure that nobody uses thresholds */
2e72b634
KS
4566 synchronize_rcu();
4567
2e72b634
KS
4568unlock:
4569 mutex_unlock(&memcg->thresholds_lock);
4570
4571 return ret;
4572}
4573
907860ed 4574static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
9490ff27 4575 struct cftype *cft, struct eventfd_ctx *eventfd)
2e72b634
KS
4576{
4577 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
4578 struct mem_cgroup_thresholds *thresholds;
4579 struct mem_cgroup_threshold_ary *new;
2e72b634
KS
4580 int type = MEMFILE_TYPE(cft->private);
4581 u64 usage;
2c488db2 4582 int i, j, size;
2e72b634
KS
4583
4584 mutex_lock(&memcg->thresholds_lock);
4585 if (type == _MEM)
2c488db2 4586 thresholds = &memcg->thresholds;
2e72b634 4587 else if (type == _MEMSWAP)
2c488db2 4588 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
4589 else
4590 BUG();
4591
4592 /*
4593 * Something went wrong if we trying to unregister a threshold
4594 * if we don't have thresholds
4595 */
4596 BUG_ON(!thresholds);
4597
4598 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4599
4600 /* Check if a threshold crossed before removing */
4601 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4602
4603 /* Calculate new number of threshold */
2c488db2
KS
4604 size = 0;
4605 for (i = 0; i < thresholds->primary->size; i++) {
4606 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
4607 size++;
4608 }
4609
2c488db2 4610 new = thresholds->spare;
907860ed 4611
2e72b634
KS
4612 /* Set thresholds array to NULL if we don't have thresholds */
4613 if (!size) {
2c488db2
KS
4614 kfree(new);
4615 new = NULL;
907860ed 4616 goto swap_buffers;
2e72b634
KS
4617 }
4618
2c488db2 4619 new->size = size;
2e72b634
KS
4620
4621 /* Copy thresholds and find current threshold */
2c488db2
KS
4622 new->current_threshold = -1;
4623 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4624 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4625 continue;
4626
2c488db2
KS
4627 new->entries[j] = thresholds->primary->entries[i];
4628 if (new->entries[j].threshold < usage) {
2e72b634 4629 /*
2c488db2 4630 * new->current_threshold will not be used
2e72b634
KS
4631 * until rcu_assign_pointer(), so it's safe to increment
4632 * it here.
4633 */
2c488db2 4634 ++new->current_threshold;
2e72b634
KS
4635 }
4636 j++;
4637 }
4638
907860ed 4639swap_buffers:
2c488db2
KS
4640 /* Swap primary and spare array */
4641 thresholds->spare = thresholds->primary;
4642 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4643
907860ed 4644 /* To be sure that nobody uses thresholds */
2e72b634
KS
4645 synchronize_rcu();
4646
2e72b634 4647 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4648}
c1e862c1 4649
9490ff27
KH
4650static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4651 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4652{
4653 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4654 struct mem_cgroup_eventfd_list *event;
4655 int type = MEMFILE_TYPE(cft->private);
4656
4657 BUG_ON(type != _OOM_TYPE);
4658 event = kmalloc(sizeof(*event), GFP_KERNEL);
4659 if (!event)
4660 return -ENOMEM;
4661
1af8efe9 4662 spin_lock(&memcg_oom_lock);
9490ff27
KH
4663
4664 event->eventfd = eventfd;
4665 list_add(&event->list, &memcg->oom_notify);
4666
4667 /* already in OOM ? */
79dfdacc 4668 if (atomic_read(&memcg->under_oom))
9490ff27 4669 eventfd_signal(eventfd, 1);
1af8efe9 4670 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4671
4672 return 0;
4673}
4674
907860ed 4675static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
9490ff27
KH
4676 struct cftype *cft, struct eventfd_ctx *eventfd)
4677{
4678 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4679 struct mem_cgroup_eventfd_list *ev, *tmp;
4680 int type = MEMFILE_TYPE(cft->private);
4681
4682 BUG_ON(type != _OOM_TYPE);
4683
1af8efe9 4684 spin_lock(&memcg_oom_lock);
9490ff27
KH
4685
4686 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4687 if (ev->eventfd == eventfd) {
4688 list_del(&ev->list);
4689 kfree(ev);
4690 }
4691 }
4692
1af8efe9 4693 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4694}
4695
3c11ecf4
KH
4696static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4697 struct cftype *cft, struct cgroup_map_cb *cb)
4698{
4699 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4700
4701 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4702
79dfdacc 4703 if (atomic_read(&mem->under_oom))
3c11ecf4
KH
4704 cb->fill(cb, "under_oom", 1);
4705 else
4706 cb->fill(cb, "under_oom", 0);
4707 return 0;
4708}
4709
3c11ecf4
KH
4710static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4711 struct cftype *cft, u64 val)
4712{
4713 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4714 struct mem_cgroup *parent;
4715
4716 /* cannot set to root cgroup and only 0 and 1 are allowed */
4717 if (!cgrp->parent || !((val == 0) || (val == 1)))
4718 return -EINVAL;
4719
4720 parent = mem_cgroup_from_cont(cgrp->parent);
4721
4722 cgroup_lock();
4723 /* oom-kill-disable is a flag for subhierarchy. */
4724 if ((parent->use_hierarchy) ||
4725 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4726 cgroup_unlock();
4727 return -EINVAL;
4728 }
4729 mem->oom_kill_disable = val;
4d845ebf
KH
4730 if (!val)
4731 memcg_oom_recover(mem);
3c11ecf4
KH
4732 cgroup_unlock();
4733 return 0;
4734}
4735
406eb0c9
YH
4736#ifdef CONFIG_NUMA
4737static const struct file_operations mem_control_numa_stat_file_operations = {
4738 .read = seq_read,
4739 .llseek = seq_lseek,
4740 .release = single_release,
4741};
4742
4743static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4744{
4745 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4746
4747 file->f_op = &mem_control_numa_stat_file_operations;
4748 return single_open(file, mem_control_numa_stat_show, cont);
4749}
4750#endif /* CONFIG_NUMA */
4751
82f9d486
KH
4752static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp,
4753 struct cftype *cft,
4754 struct cgroup_map_cb *cb)
4755{
4756 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4757 char string[64];
4758 int i;
4759
4760 for (i = 0; i < NR_SCANSTATS; i++) {
4761 strcpy(string, scanstat_string[i]);
4762 strcat(string, SCANSTAT_WORD_LIMIT);
4763 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]);
4764 }
4765
4766 for (i = 0; i < NR_SCANSTATS; i++) {
4767 strcpy(string, scanstat_string[i]);
4768 strcat(string, SCANSTAT_WORD_SYSTEM);
4769 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]);
4770 }
4771
4772 for (i = 0; i < NR_SCANSTATS; i++) {
4773 strcpy(string, scanstat_string[i]);
4774 strcat(string, SCANSTAT_WORD_LIMIT);
4775 strcat(string, SCANSTAT_WORD_HIERARCHY);
4776 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]);
4777 }
4778 for (i = 0; i < NR_SCANSTATS; i++) {
4779 strcpy(string, scanstat_string[i]);
4780 strcat(string, SCANSTAT_WORD_SYSTEM);
4781 strcat(string, SCANSTAT_WORD_HIERARCHY);
4782 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]);
4783 }
4784 return 0;
4785}
4786
4787static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp,
4788 unsigned int event)
4789{
4790 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4791
4792 spin_lock(&mem->scanstat.lock);
4793 memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats));
4794 memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats));
4795 spin_unlock(&mem->scanstat.lock);
4796 return 0;
4797}
4798
4799
8cdea7c0
BS
4800static struct cftype mem_cgroup_files[] = {
4801 {
0eea1030 4802 .name = "usage_in_bytes",
8c7c6e34 4803 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2c3daa72 4804 .read_u64 = mem_cgroup_read,
9490ff27
KH
4805 .register_event = mem_cgroup_usage_register_event,
4806 .unregister_event = mem_cgroup_usage_unregister_event,
8cdea7c0 4807 },
c84872e1
PE
4808 {
4809 .name = "max_usage_in_bytes",
8c7c6e34 4810 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4da 4811 .trigger = mem_cgroup_reset,
c84872e1
PE
4812 .read_u64 = mem_cgroup_read,
4813 },
8cdea7c0 4814 {
0eea1030 4815 .name = "limit_in_bytes",
8c7c6e34 4816 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa 4817 .write_string = mem_cgroup_write,
2c3daa72 4818 .read_u64 = mem_cgroup_read,
8cdea7c0 4819 },
296c81d8
BS
4820 {
4821 .name = "soft_limit_in_bytes",
4822 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4823 .write_string = mem_cgroup_write,
4824 .read_u64 = mem_cgroup_read,
4825 },
8cdea7c0
BS
4826 {
4827 .name = "failcnt",
8c7c6e34 4828 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4da 4829 .trigger = mem_cgroup_reset,
2c3daa72 4830 .read_u64 = mem_cgroup_read,
8cdea7c0 4831 },
d2ceb9b7
KH
4832 {
4833 .name = "stat",
c64745cf 4834 .read_map = mem_control_stat_show,
d2ceb9b7 4835 },
c1e862c1
KH
4836 {
4837 .name = "force_empty",
4838 .trigger = mem_cgroup_force_empty_write,
4839 },
18f59ea7
BS
4840 {
4841 .name = "use_hierarchy",
4842 .write_u64 = mem_cgroup_hierarchy_write,
4843 .read_u64 = mem_cgroup_hierarchy_read,
4844 },
a7885eb8
KM
4845 {
4846 .name = "swappiness",
4847 .read_u64 = mem_cgroup_swappiness_read,
4848 .write_u64 = mem_cgroup_swappiness_write,
4849 },
7dc74be0
DN
4850 {
4851 .name = "move_charge_at_immigrate",
4852 .read_u64 = mem_cgroup_move_charge_read,
4853 .write_u64 = mem_cgroup_move_charge_write,
4854 },
9490ff27
KH
4855 {
4856 .name = "oom_control",
3c11ecf4
KH
4857 .read_map = mem_cgroup_oom_control_read,
4858 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
4859 .register_event = mem_cgroup_oom_register_event,
4860 .unregister_event = mem_cgroup_oom_unregister_event,
4861 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4862 },
406eb0c9
YH
4863#ifdef CONFIG_NUMA
4864 {
4865 .name = "numa_stat",
4866 .open = mem_control_numa_stat_open,
89577127 4867 .mode = S_IRUGO,
406eb0c9
YH
4868 },
4869#endif
82f9d486
KH
4870 {
4871 .name = "vmscan_stat",
4872 .read_map = mem_cgroup_vmscan_stat_read,
4873 .trigger = mem_cgroup_reset_vmscan_stat,
4874 },
8cdea7c0
BS
4875};
4876
8c7c6e34
KH
4877#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4878static struct cftype memsw_cgroup_files[] = {
4879 {
4880 .name = "memsw.usage_in_bytes",
4881 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4882 .read_u64 = mem_cgroup_read,
9490ff27
KH
4883 .register_event = mem_cgroup_usage_register_event,
4884 .unregister_event = mem_cgroup_usage_unregister_event,
8c7c6e34
KH
4885 },
4886 {
4887 .name = "memsw.max_usage_in_bytes",
4888 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4889 .trigger = mem_cgroup_reset,
4890 .read_u64 = mem_cgroup_read,
4891 },
4892 {
4893 .name = "memsw.limit_in_bytes",
4894 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4895 .write_string = mem_cgroup_write,
4896 .read_u64 = mem_cgroup_read,
4897 },
4898 {
4899 .name = "memsw.failcnt",
4900 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4901 .trigger = mem_cgroup_reset,
4902 .read_u64 = mem_cgroup_read,
4903 },
4904};
4905
4906static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4907{
4908 if (!do_swap_account)
4909 return 0;
4910 return cgroup_add_files(cont, ss, memsw_cgroup_files,
4911 ARRAY_SIZE(memsw_cgroup_files));
4912};
4913#else
4914static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4915{
4916 return 0;
4917}
4918#endif
4919
6d12e2d8
KH
4920static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4921{
4922 struct mem_cgroup_per_node *pn;
1ecaab2b 4923 struct mem_cgroup_per_zone *mz;
b69408e8 4924 enum lru_list l;
41e3355d 4925 int zone, tmp = node;
1ecaab2b
KH
4926 /*
4927 * This routine is called against possible nodes.
4928 * But it's BUG to call kmalloc() against offline node.
4929 *
4930 * TODO: this routine can waste much memory for nodes which will
4931 * never be onlined. It's better to use memory hotplug callback
4932 * function.
4933 */
41e3355d
KH
4934 if (!node_state(node, N_NORMAL_MEMORY))
4935 tmp = -1;
17295c88 4936 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
4937 if (!pn)
4938 return 1;
1ecaab2b 4939
6d12e2d8 4940 mem->info.nodeinfo[node] = pn;
1ecaab2b
KH
4941 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4942 mz = &pn->zoneinfo[zone];
b69408e8
CL
4943 for_each_lru(l)
4944 INIT_LIST_HEAD(&mz->lists[l]);
f64c3f54 4945 mz->usage_in_excess = 0;
4e416953
BS
4946 mz->on_tree = false;
4947 mz->mem = mem;
1ecaab2b 4948 }
6d12e2d8
KH
4949 return 0;
4950}
4951
1ecaab2b
KH
4952static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4953{
4954 kfree(mem->info.nodeinfo[node]);
4955}
4956
33327948
KH
4957static struct mem_cgroup *mem_cgroup_alloc(void)
4958{
4959 struct mem_cgroup *mem;
c62b1a3b 4960 int size = sizeof(struct mem_cgroup);
33327948 4961
c62b1a3b 4962 /* Can be very big if MAX_NUMNODES is very big */
c8dad2bb 4963 if (size < PAGE_SIZE)
17295c88 4964 mem = kzalloc(size, GFP_KERNEL);
33327948 4965 else
17295c88 4966 mem = vzalloc(size);
33327948 4967
e7bbcdf3
DC
4968 if (!mem)
4969 return NULL;
4970
c62b1a3b 4971 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
d2e61b8d
DC
4972 if (!mem->stat)
4973 goto out_free;
711d3d2c 4974 spin_lock_init(&mem->pcp_counter_lock);
33327948 4975 return mem;
d2e61b8d
DC
4976
4977out_free:
4978 if (size < PAGE_SIZE)
4979 kfree(mem);
4980 else
4981 vfree(mem);
4982 return NULL;
33327948
KH
4983}
4984
8c7c6e34
KH
4985/*
4986 * At destroying mem_cgroup, references from swap_cgroup can remain.
4987 * (scanning all at force_empty is too costly...)
4988 *
4989 * Instead of clearing all references at force_empty, we remember
4990 * the number of reference from swap_cgroup and free mem_cgroup when
4991 * it goes down to 0.
4992 *
8c7c6e34
KH
4993 * Removal of cgroup itself succeeds regardless of refs from swap.
4994 */
4995
a7ba0eef 4996static void __mem_cgroup_free(struct mem_cgroup *mem)
33327948 4997{
08e552c6
KH
4998 int node;
4999
f64c3f54 5000 mem_cgroup_remove_from_trees(mem);
04046e1a
KH
5001 free_css_id(&mem_cgroup_subsys, &mem->css);
5002
08e552c6
KH
5003 for_each_node_state(node, N_POSSIBLE)
5004 free_mem_cgroup_per_zone_info(mem, node);
5005
c62b1a3b
KH
5006 free_percpu(mem->stat);
5007 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
33327948
KH
5008 kfree(mem);
5009 else
5010 vfree(mem);
5011}
5012
8c7c6e34
KH
5013static void mem_cgroup_get(struct mem_cgroup *mem)
5014{
5015 atomic_inc(&mem->refcnt);
5016}
5017
483c30b5 5018static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
8c7c6e34 5019{
483c30b5 5020 if (atomic_sub_and_test(count, &mem->refcnt)) {
7bcc1bb1 5021 struct mem_cgroup *parent = parent_mem_cgroup(mem);
a7ba0eef 5022 __mem_cgroup_free(mem);
7bcc1bb1
DN
5023 if (parent)
5024 mem_cgroup_put(parent);
5025 }
8c7c6e34
KH
5026}
5027
483c30b5
DN
5028static void mem_cgroup_put(struct mem_cgroup *mem)
5029{
5030 __mem_cgroup_put(mem, 1);
5031}
5032
7bcc1bb1
DN
5033/*
5034 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
5035 */
5036static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
5037{
5038 if (!mem->res.parent)
5039 return NULL;
5040 return mem_cgroup_from_res_counter(mem->res.parent, res);
5041}
33327948 5042
c077719b
KH
5043#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5044static void __init enable_swap_cgroup(void)
5045{
f8d66542 5046 if (!mem_cgroup_disabled() && really_do_swap_account)
c077719b
KH
5047 do_swap_account = 1;
5048}
5049#else
5050static void __init enable_swap_cgroup(void)
5051{
5052}
5053#endif
5054
f64c3f54
BS
5055static int mem_cgroup_soft_limit_tree_init(void)
5056{
5057 struct mem_cgroup_tree_per_node *rtpn;
5058 struct mem_cgroup_tree_per_zone *rtpz;
5059 int tmp, node, zone;
5060
5061 for_each_node_state(node, N_POSSIBLE) {
5062 tmp = node;
5063 if (!node_state(node, N_NORMAL_MEMORY))
5064 tmp = -1;
5065 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
5066 if (!rtpn)
5067 return 1;
5068
5069 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5070
5071 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5072 rtpz = &rtpn->rb_tree_per_zone[zone];
5073 rtpz->rb_root = RB_ROOT;
5074 spin_lock_init(&rtpz->lock);
5075 }
5076 }
5077 return 0;
5078}
5079
0eb253e2 5080static struct cgroup_subsys_state * __ref
8cdea7c0
BS
5081mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
5082{
28dbc4b6 5083 struct mem_cgroup *mem, *parent;
04046e1a 5084 long error = -ENOMEM;
6d12e2d8 5085 int node;
8cdea7c0 5086
c8dad2bb
JB
5087 mem = mem_cgroup_alloc();
5088 if (!mem)
04046e1a 5089 return ERR_PTR(error);
78fb7466 5090
6d12e2d8
KH
5091 for_each_node_state(node, N_POSSIBLE)
5092 if (alloc_mem_cgroup_per_zone_info(mem, node))
5093 goto free_out;
f64c3f54 5094
c077719b 5095 /* root ? */
28dbc4b6 5096 if (cont->parent == NULL) {
cdec2e42 5097 int cpu;
c077719b 5098 enable_swap_cgroup();
28dbc4b6 5099 parent = NULL;
4b3bde4c 5100 root_mem_cgroup = mem;
f64c3f54
BS
5101 if (mem_cgroup_soft_limit_tree_init())
5102 goto free_out;
cdec2e42
KH
5103 for_each_possible_cpu(cpu) {
5104 struct memcg_stock_pcp *stock =
5105 &per_cpu(memcg_stock, cpu);
5106 INIT_WORK(&stock->work, drain_local_stock);
5107 }
711d3d2c 5108 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
18f59ea7 5109 } else {
28dbc4b6 5110 parent = mem_cgroup_from_cont(cont->parent);
18f59ea7 5111 mem->use_hierarchy = parent->use_hierarchy;
3c11ecf4 5112 mem->oom_kill_disable = parent->oom_kill_disable;
18f59ea7 5113 }
28dbc4b6 5114
18f59ea7
BS
5115 if (parent && parent->use_hierarchy) {
5116 res_counter_init(&mem->res, &parent->res);
5117 res_counter_init(&mem->memsw, &parent->memsw);
7bcc1bb1
DN
5118 /*
5119 * We increment refcnt of the parent to ensure that we can
5120 * safely access it on res_counter_charge/uncharge.
5121 * This refcnt will be decremented when freeing this
5122 * mem_cgroup(see mem_cgroup_put).
5123 */
5124 mem_cgroup_get(parent);
18f59ea7
BS
5125 } else {
5126 res_counter_init(&mem->res, NULL);
5127 res_counter_init(&mem->memsw, NULL);
5128 }
04046e1a 5129 mem->last_scanned_child = 0;
889976db 5130 mem->last_scanned_node = MAX_NUMNODES;
9490ff27 5131 INIT_LIST_HEAD(&mem->oom_notify);
6d61ef40 5132
a7885eb8 5133 if (parent)
1f4c025b 5134 mem->swappiness = mem_cgroup_swappiness(parent);
a7ba0eef 5135 atomic_set(&mem->refcnt, 1);
7dc74be0 5136 mem->move_charge_at_immigrate = 0;
2e72b634 5137 mutex_init(&mem->thresholds_lock);
82f9d486 5138 spin_lock_init(&mem->scanstat.lock);
8cdea7c0 5139 return &mem->css;
6d12e2d8 5140free_out:
a7ba0eef 5141 __mem_cgroup_free(mem);
4b3bde4c 5142 root_mem_cgroup = NULL;
04046e1a 5143 return ERR_PTR(error);
8cdea7c0
BS
5144}
5145
ec64f515 5146static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
df878fb0
KH
5147 struct cgroup *cont)
5148{
5149 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
ec64f515
KH
5150
5151 return mem_cgroup_force_empty(mem, false);
df878fb0
KH
5152}
5153
8cdea7c0
BS
5154static void mem_cgroup_destroy(struct cgroup_subsys *ss,
5155 struct cgroup *cont)
5156{
c268e994 5157 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
c268e994 5158
c268e994 5159 mem_cgroup_put(mem);
8cdea7c0
BS
5160}
5161
5162static int mem_cgroup_populate(struct cgroup_subsys *ss,
5163 struct cgroup *cont)
5164{
8c7c6e34
KH
5165 int ret;
5166
5167 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
5168 ARRAY_SIZE(mem_cgroup_files));
5169
5170 if (!ret)
5171 ret = register_memsw_files(cont, ss);
5172 return ret;
8cdea7c0
BS
5173}
5174
02491447 5175#ifdef CONFIG_MMU
7dc74be0 5176/* Handlers for move charge at task migration. */
854ffa8d
DN
5177#define PRECHARGE_COUNT_AT_ONCE 256
5178static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 5179{
854ffa8d
DN
5180 int ret = 0;
5181 int batch_count = PRECHARGE_COUNT_AT_ONCE;
4ffef5fe
DN
5182 struct mem_cgroup *mem = mc.to;
5183
854ffa8d
DN
5184 if (mem_cgroup_is_root(mem)) {
5185 mc.precharge += count;
5186 /* we don't need css_get for root */
5187 return ret;
5188 }
5189 /* try to charge at once */
5190 if (count > 1) {
5191 struct res_counter *dummy;
5192 /*
5193 * "mem" cannot be under rmdir() because we've already checked
5194 * by cgroup_lock_live_cgroup() that it is not removed and we
5195 * are still under the same cgroup_mutex. So we can postpone
5196 * css_get().
5197 */
5198 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
5199 goto one_by_one;
5200 if (do_swap_account && res_counter_charge(&mem->memsw,
5201 PAGE_SIZE * count, &dummy)) {
5202 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
5203 goto one_by_one;
5204 }
5205 mc.precharge += count;
854ffa8d
DN
5206 return ret;
5207 }
5208one_by_one:
5209 /* fall back to one by one charge */
5210 while (count--) {
5211 if (signal_pending(current)) {
5212 ret = -EINTR;
5213 break;
5214 }
5215 if (!batch_count--) {
5216 batch_count = PRECHARGE_COUNT_AT_ONCE;
5217 cond_resched();
5218 }
7ec99d62 5219 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
854ffa8d
DN
5220 if (ret || !mem)
5221 /* mem_cgroup_clear_mc() will do uncharge later */
5222 return -ENOMEM;
5223 mc.precharge++;
5224 }
4ffef5fe
DN
5225 return ret;
5226}
5227
5228/**
5229 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5230 * @vma: the vma the pte to be checked belongs
5231 * @addr: the address corresponding to the pte to be checked
5232 * @ptent: the pte to be checked
02491447 5233 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fe
DN
5234 *
5235 * Returns
5236 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5237 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5238 * move charge. if @target is not NULL, the page is stored in target->page
5239 * with extra refcnt got(Callers should handle it).
02491447
DN
5240 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5241 * target for charge migration. if @target is not NULL, the entry is stored
5242 * in target->ent.
4ffef5fe
DN
5243 *
5244 * Called with pte lock held.
5245 */
4ffef5fe
DN
5246union mc_target {
5247 struct page *page;
02491447 5248 swp_entry_t ent;
4ffef5fe
DN
5249};
5250
4ffef5fe
DN
5251enum mc_target_type {
5252 MC_TARGET_NONE, /* not used */
5253 MC_TARGET_PAGE,
02491447 5254 MC_TARGET_SWAP,
4ffef5fe
DN
5255};
5256
90254a65
DN
5257static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5258 unsigned long addr, pte_t ptent)
4ffef5fe 5259{
90254a65 5260 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 5261
90254a65
DN
5262 if (!page || !page_mapped(page))
5263 return NULL;
5264 if (PageAnon(page)) {
5265 /* we don't move shared anon */
5266 if (!move_anon() || page_mapcount(page) > 2)
5267 return NULL;
87946a72
DN
5268 } else if (!move_file())
5269 /* we ignore mapcount for file pages */
90254a65
DN
5270 return NULL;
5271 if (!get_page_unless_zero(page))
5272 return NULL;
5273
5274 return page;
5275}
5276
5277static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5278 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5279{
5280 int usage_count;
5281 struct page *page = NULL;
5282 swp_entry_t ent = pte_to_swp_entry(ptent);
5283
5284 if (!move_anon() || non_swap_entry(ent))
5285 return NULL;
5286 usage_count = mem_cgroup_count_swap_user(ent, &page);
5287 if (usage_count > 1) { /* we don't move shared anon */
02491447
DN
5288 if (page)
5289 put_page(page);
90254a65 5290 return NULL;
02491447 5291 }
90254a65
DN
5292 if (do_swap_account)
5293 entry->val = ent.val;
5294
5295 return page;
5296}
5297
87946a72
DN
5298static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5299 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5300{
5301 struct page *page = NULL;
5302 struct inode *inode;
5303 struct address_space *mapping;
5304 pgoff_t pgoff;
5305
5306 if (!vma->vm_file) /* anonymous vma */
5307 return NULL;
5308 if (!move_file())
5309 return NULL;
5310
5311 inode = vma->vm_file->f_path.dentry->d_inode;
5312 mapping = vma->vm_file->f_mapping;
5313 if (pte_none(ptent))
5314 pgoff = linear_page_index(vma, addr);
5315 else /* pte_file(ptent) is true */
5316 pgoff = pte_to_pgoff(ptent);
5317
5318 /* page is moved even if it's not RSS of this task(page-faulted). */
5319 if (!mapping_cap_swap_backed(mapping)) { /* normal file */
5320 page = find_get_page(mapping, pgoff);
5321 } else { /* shmem/tmpfs file. we should take account of swap too. */
5322 swp_entry_t ent;
5323 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
5324 if (do_swap_account)
5325 entry->val = ent.val;
5326 }
5327
5328 return page;
5329}
5330
90254a65
DN
5331static int is_target_pte_for_mc(struct vm_area_struct *vma,
5332 unsigned long addr, pte_t ptent, union mc_target *target)
5333{
5334 struct page *page = NULL;
5335 struct page_cgroup *pc;
5336 int ret = 0;
5337 swp_entry_t ent = { .val = 0 };
5338
5339 if (pte_present(ptent))
5340 page = mc_handle_present_pte(vma, addr, ptent);
5341 else if (is_swap_pte(ptent))
5342 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a72
DN
5343 else if (pte_none(ptent) || pte_file(ptent))
5344 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
5345
5346 if (!page && !ent.val)
5347 return 0;
02491447
DN
5348 if (page) {
5349 pc = lookup_page_cgroup(page);
5350 /*
5351 * Do only loose check w/o page_cgroup lock.
5352 * mem_cgroup_move_account() checks the pc is valid or not under
5353 * the lock.
5354 */
5355 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5356 ret = MC_TARGET_PAGE;
5357 if (target)
5358 target->page = page;
5359 }
5360 if (!ret || !target)
5361 put_page(page);
5362 }
90254a65
DN
5363 /* There is a swap entry and a page doesn't exist or isn't charged */
5364 if (ent.val && !ret &&
7f0f1546
KH
5365 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5366 ret = MC_TARGET_SWAP;
5367 if (target)
5368 target->ent = ent;
4ffef5fe 5369 }
4ffef5fe
DN
5370 return ret;
5371}
5372
5373static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5374 unsigned long addr, unsigned long end,
5375 struct mm_walk *walk)
5376{
5377 struct vm_area_struct *vma = walk->private;
5378 pte_t *pte;
5379 spinlock_t *ptl;
5380
03319327
DH
5381 split_huge_page_pmd(walk->mm, pmd);
5382
4ffef5fe
DN
5383 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5384 for (; addr != end; pte++, addr += PAGE_SIZE)
5385 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5386 mc.precharge++; /* increment precharge temporarily */
5387 pte_unmap_unlock(pte - 1, ptl);
5388 cond_resched();
5389
7dc74be0
DN
5390 return 0;
5391}
5392
4ffef5fe
DN
5393static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5394{
5395 unsigned long precharge;
5396 struct vm_area_struct *vma;
5397
dfe076b0 5398 down_read(&mm->mmap_sem);
4ffef5fe
DN
5399 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5400 struct mm_walk mem_cgroup_count_precharge_walk = {
5401 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5402 .mm = mm,
5403 .private = vma,
5404 };
5405 if (is_vm_hugetlb_page(vma))
5406 continue;
4ffef5fe
DN
5407 walk_page_range(vma->vm_start, vma->vm_end,
5408 &mem_cgroup_count_precharge_walk);
5409 }
dfe076b0 5410 up_read(&mm->mmap_sem);
4ffef5fe
DN
5411
5412 precharge = mc.precharge;
5413 mc.precharge = 0;
5414
5415 return precharge;
5416}
5417
4ffef5fe
DN
5418static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5419{
dfe076b0
DN
5420 unsigned long precharge = mem_cgroup_count_precharge(mm);
5421
5422 VM_BUG_ON(mc.moving_task);
5423 mc.moving_task = current;
5424 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
5425}
5426
dfe076b0
DN
5427/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5428static void __mem_cgroup_clear_mc(void)
4ffef5fe 5429{
2bd9bb20
KH
5430 struct mem_cgroup *from = mc.from;
5431 struct mem_cgroup *to = mc.to;
5432
4ffef5fe 5433 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d
DN
5434 if (mc.precharge) {
5435 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
5436 mc.precharge = 0;
5437 }
5438 /*
5439 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5440 * we must uncharge here.
5441 */
5442 if (mc.moved_charge) {
5443 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5444 mc.moved_charge = 0;
4ffef5fe 5445 }
483c30b5
DN
5446 /* we must fixup refcnts and charges */
5447 if (mc.moved_swap) {
483c30b5
DN
5448 /* uncharge swap account from the old cgroup */
5449 if (!mem_cgroup_is_root(mc.from))
5450 res_counter_uncharge(&mc.from->memsw,
5451 PAGE_SIZE * mc.moved_swap);
5452 __mem_cgroup_put(mc.from, mc.moved_swap);
5453
5454 if (!mem_cgroup_is_root(mc.to)) {
5455 /*
5456 * we charged both to->res and to->memsw, so we should
5457 * uncharge to->res.
5458 */
5459 res_counter_uncharge(&mc.to->res,
5460 PAGE_SIZE * mc.moved_swap);
483c30b5
DN
5461 }
5462 /* we've already done mem_cgroup_get(mc.to) */
483c30b5
DN
5463 mc.moved_swap = 0;
5464 }
dfe076b0
DN
5465 memcg_oom_recover(from);
5466 memcg_oom_recover(to);
5467 wake_up_all(&mc.waitq);
5468}
5469
5470static void mem_cgroup_clear_mc(void)
5471{
5472 struct mem_cgroup *from = mc.from;
5473
5474 /*
5475 * we must clear moving_task before waking up waiters at the end of
5476 * task migration.
5477 */
5478 mc.moving_task = NULL;
5479 __mem_cgroup_clear_mc();
2bd9bb20 5480 spin_lock(&mc.lock);
4ffef5fe
DN
5481 mc.from = NULL;
5482 mc.to = NULL;
2bd9bb20 5483 spin_unlock(&mc.lock);
32047e2a 5484 mem_cgroup_end_move(from);
4ffef5fe
DN
5485}
5486
7dc74be0
DN
5487static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5488 struct cgroup *cgroup,
f780bdb7 5489 struct task_struct *p)
7dc74be0
DN
5490{
5491 int ret = 0;
5492 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
5493
5494 if (mem->move_charge_at_immigrate) {
5495 struct mm_struct *mm;
5496 struct mem_cgroup *from = mem_cgroup_from_task(p);
5497
5498 VM_BUG_ON(from == mem);
5499
5500 mm = get_task_mm(p);
5501 if (!mm)
5502 return 0;
7dc74be0 5503 /* We move charges only when we move a owner of the mm */
4ffef5fe
DN
5504 if (mm->owner == p) {
5505 VM_BUG_ON(mc.from);
5506 VM_BUG_ON(mc.to);
5507 VM_BUG_ON(mc.precharge);
854ffa8d 5508 VM_BUG_ON(mc.moved_charge);
483c30b5 5509 VM_BUG_ON(mc.moved_swap);
32047e2a 5510 mem_cgroup_start_move(from);
2bd9bb20 5511 spin_lock(&mc.lock);
4ffef5fe
DN
5512 mc.from = from;
5513 mc.to = mem;
2bd9bb20 5514 spin_unlock(&mc.lock);
dfe076b0 5515 /* We set mc.moving_task later */
4ffef5fe
DN
5516
5517 ret = mem_cgroup_precharge_mc(mm);
5518 if (ret)
5519 mem_cgroup_clear_mc();
dfe076b0
DN
5520 }
5521 mmput(mm);
7dc74be0
DN
5522 }
5523 return ret;
5524}
5525
5526static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5527 struct cgroup *cgroup,
f780bdb7 5528 struct task_struct *p)
7dc74be0 5529{
4ffef5fe 5530 mem_cgroup_clear_mc();
7dc74be0
DN
5531}
5532
4ffef5fe
DN
5533static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5534 unsigned long addr, unsigned long end,
5535 struct mm_walk *walk)
7dc74be0 5536{
4ffef5fe
DN
5537 int ret = 0;
5538 struct vm_area_struct *vma = walk->private;
5539 pte_t *pte;
5540 spinlock_t *ptl;
5541
03319327 5542 split_huge_page_pmd(walk->mm, pmd);
4ffef5fe
DN
5543retry:
5544 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5545 for (; addr != end; addr += PAGE_SIZE) {
5546 pte_t ptent = *(pte++);
5547 union mc_target target;
5548 int type;
5549 struct page *page;
5550 struct page_cgroup *pc;
02491447 5551 swp_entry_t ent;
4ffef5fe
DN
5552
5553 if (!mc.precharge)
5554 break;
5555
5556 type = is_target_pte_for_mc(vma, addr, ptent, &target);
5557 switch (type) {
5558 case MC_TARGET_PAGE:
5559 page = target.page;
5560 if (isolate_lru_page(page))
5561 goto put;
5562 pc = lookup_page_cgroup(page);
7ec99d62
JW
5563 if (!mem_cgroup_move_account(page, 1, pc,
5564 mc.from, mc.to, false)) {
4ffef5fe 5565 mc.precharge--;
854ffa8d
DN
5566 /* we uncharge from mc.from later. */
5567 mc.moved_charge++;
4ffef5fe
DN
5568 }
5569 putback_lru_page(page);
5570put: /* is_target_pte_for_mc() gets the page */
5571 put_page(page);
5572 break;
02491447
DN
5573 case MC_TARGET_SWAP:
5574 ent = target.ent;
483c30b5
DN
5575 if (!mem_cgroup_move_swap_account(ent,
5576 mc.from, mc.to, false)) {
02491447 5577 mc.precharge--;
483c30b5
DN
5578 /* we fixup refcnts and charges later. */
5579 mc.moved_swap++;
5580 }
02491447 5581 break;
4ffef5fe
DN
5582 default:
5583 break;
5584 }
5585 }
5586 pte_unmap_unlock(pte - 1, ptl);
5587 cond_resched();
5588
5589 if (addr != end) {
5590 /*
5591 * We have consumed all precharges we got in can_attach().
5592 * We try charge one by one, but don't do any additional
5593 * charges to mc.to if we have failed in charge once in attach()
5594 * phase.
5595 */
854ffa8d 5596 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
5597 if (!ret)
5598 goto retry;
5599 }
5600
5601 return ret;
5602}
5603
5604static void mem_cgroup_move_charge(struct mm_struct *mm)
5605{
5606 struct vm_area_struct *vma;
5607
5608 lru_add_drain_all();
dfe076b0
DN
5609retry:
5610 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5611 /*
5612 * Someone who are holding the mmap_sem might be waiting in
5613 * waitq. So we cancel all extra charges, wake up all waiters,
5614 * and retry. Because we cancel precharges, we might not be able
5615 * to move enough charges, but moving charge is a best-effort
5616 * feature anyway, so it wouldn't be a big problem.
5617 */
5618 __mem_cgroup_clear_mc();
5619 cond_resched();
5620 goto retry;
5621 }
4ffef5fe
DN
5622 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5623 int ret;
5624 struct mm_walk mem_cgroup_move_charge_walk = {
5625 .pmd_entry = mem_cgroup_move_charge_pte_range,
5626 .mm = mm,
5627 .private = vma,
5628 };
5629 if (is_vm_hugetlb_page(vma))
5630 continue;
4ffef5fe
DN
5631 ret = walk_page_range(vma->vm_start, vma->vm_end,
5632 &mem_cgroup_move_charge_walk);
5633 if (ret)
5634 /*
5635 * means we have consumed all precharges and failed in
5636 * doing additional charge. Just abandon here.
5637 */
5638 break;
5639 }
dfe076b0 5640 up_read(&mm->mmap_sem);
7dc74be0
DN
5641}
5642
67e465a7
BS
5643static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5644 struct cgroup *cont,
5645 struct cgroup *old_cont,
f780bdb7 5646 struct task_struct *p)
67e465a7 5647{
a433658c 5648 struct mm_struct *mm = get_task_mm(p);
dfe076b0 5649
dfe076b0 5650 if (mm) {
a433658c
KM
5651 if (mc.to)
5652 mem_cgroup_move_charge(mm);
5653 put_swap_token(mm);
dfe076b0
DN
5654 mmput(mm);
5655 }
a433658c
KM
5656 if (mc.to)
5657 mem_cgroup_clear_mc();
67e465a7 5658}
5cfb80a7
DN
5659#else /* !CONFIG_MMU */
5660static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5661 struct cgroup *cgroup,
f780bdb7 5662 struct task_struct *p)
5cfb80a7
DN
5663{
5664 return 0;
5665}
5666static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5667 struct cgroup *cgroup,
f780bdb7 5668 struct task_struct *p)
5cfb80a7
DN
5669{
5670}
5671static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5672 struct cgroup *cont,
5673 struct cgroup *old_cont,
f780bdb7 5674 struct task_struct *p)
5cfb80a7
DN
5675{
5676}
5677#endif
67e465a7 5678
8cdea7c0
BS
5679struct cgroup_subsys mem_cgroup_subsys = {
5680 .name = "memory",
5681 .subsys_id = mem_cgroup_subsys_id,
5682 .create = mem_cgroup_create,
df878fb0 5683 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
5684 .destroy = mem_cgroup_destroy,
5685 .populate = mem_cgroup_populate,
7dc74be0
DN
5686 .can_attach = mem_cgroup_can_attach,
5687 .cancel_attach = mem_cgroup_cancel_attach,
67e465a7 5688 .attach = mem_cgroup_move_task,
6d12e2d8 5689 .early_init = 0,
04046e1a 5690 .use_id = 1,
8cdea7c0 5691};
c077719b
KH
5692
5693#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
a42c390c
MH
5694static int __init enable_swap_account(char *s)
5695{
5696 /* consider enabled if no parameter or 1 is given */
a2c8990a 5697 if (!strcmp(s, "1"))
a42c390c 5698 really_do_swap_account = 1;
a2c8990a 5699 else if (!strcmp(s, "0"))
a42c390c
MH
5700 really_do_swap_account = 0;
5701 return 1;
5702}
a2c8990a 5703__setup("swapaccount=", enable_swap_account);
c077719b 5704
c077719b 5705#endif