]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/memcontrol.c
pinctrl: ingenic: Add lcd-8bit group for JZ4770
[mirror_ubuntu-jammy-kernel.git] / mm / memcontrol.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
8cdea7c0
BS
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
2e72b634
KS
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
7ae1e1d0
GC
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
1575e68b
JW
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
8cdea7c0
BS
23 */
24
3e32cb2e 25#include <linux/page_counter.h>
8cdea7c0
BS
26#include <linux/memcontrol.h>
27#include <linux/cgroup.h>
a520110e 28#include <linux/pagewalk.h>
6e84f315 29#include <linux/sched/mm.h>
3a4f8a0b 30#include <linux/shmem_fs.h>
4ffef5fe 31#include <linux/hugetlb.h>
d13d1443 32#include <linux/pagemap.h>
1ff9e6e1 33#include <linux/vm_event_item.h>
d52aa412 34#include <linux/smp.h>
8a9f3ccd 35#include <linux/page-flags.h>
66e1707b 36#include <linux/backing-dev.h>
8a9f3ccd
BS
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
e222432b 39#include <linux/limits.h>
b9e15baf 40#include <linux/export.h>
8c7c6e34 41#include <linux/mutex.h>
bb4cc1a8 42#include <linux/rbtree.h>
b6ac57d5 43#include <linux/slab.h>
66e1707b 44#include <linux/swap.h>
02491447 45#include <linux/swapops.h>
66e1707b 46#include <linux/spinlock.h>
2e72b634 47#include <linux/eventfd.h>
79bd9814 48#include <linux/poll.h>
2e72b634 49#include <linux/sort.h>
66e1707b 50#include <linux/fs.h>
d2ceb9b7 51#include <linux/seq_file.h>
70ddf637 52#include <linux/vmpressure.h>
b69408e8 53#include <linux/mm_inline.h>
5d1ea48b 54#include <linux/swap_cgroup.h>
cdec2e42 55#include <linux/cpu.h>
158e0a2d 56#include <linux/oom.h>
0056f4e6 57#include <linux/lockdep.h>
79bd9814 58#include <linux/file.h>
b23afb93 59#include <linux/tracehook.h>
0e4b01df 60#include <linux/psi.h>
c8713d0b 61#include <linux/seq_buf.h>
08e552c6 62#include "internal.h"
d1a4c0b3 63#include <net/sock.h>
4bd2c1ee 64#include <net/ip.h>
f35c3a8e 65#include "slab.h"
8cdea7c0 66
7c0f6ba6 67#include <linux/uaccess.h>
8697d331 68
cc8e970c
KM
69#include <trace/events/vmscan.h>
70
073219e9
TH
71struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 73
7d828602
JW
74struct mem_cgroup *root_mem_cgroup __read_mostly;
75
37d5985c
RG
76/* Active memory cgroup to use from an interrupt context */
77DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
78
f7e1cb6e
JW
79/* Socket memory accounting disabled? */
80static bool cgroup_memory_nosocket;
81
04823c83
VD
82/* Kernel memory accounting disabled? */
83static bool cgroup_memory_nokmem;
84
21afa38e 85/* Whether the swap controller is active */
c255a458 86#ifdef CONFIG_MEMCG_SWAP
eccb52e7 87bool cgroup_memory_noswap __read_mostly;
c077719b 88#else
eccb52e7 89#define cgroup_memory_noswap 1
2d1c4980 90#endif
c077719b 91
97b27821
TH
92#ifdef CONFIG_CGROUP_WRITEBACK
93static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
94#endif
95
7941d214
JW
96/* Whether legacy memory+swap accounting is active */
97static bool do_memsw_account(void)
98{
eccb52e7 99 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
7941d214
JW
100}
101
a0db00fc
KS
102#define THRESHOLDS_EVENTS_TARGET 128
103#define SOFTLIMIT_EVENTS_TARGET 1024
e9f8974f 104
bb4cc1a8
AM
105/*
106 * Cgroups above their limits are maintained in a RB-Tree, independent of
107 * their hierarchy representation
108 */
109
ef8f2327 110struct mem_cgroup_tree_per_node {
bb4cc1a8 111 struct rb_root rb_root;
fa90b2fd 112 struct rb_node *rb_rightmost;
bb4cc1a8
AM
113 spinlock_t lock;
114};
115
bb4cc1a8
AM
116struct mem_cgroup_tree {
117 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
118};
119
120static struct mem_cgroup_tree soft_limit_tree __read_mostly;
121
9490ff27
KH
122/* for OOM */
123struct mem_cgroup_eventfd_list {
124 struct list_head list;
125 struct eventfd_ctx *eventfd;
126};
2e72b634 127
79bd9814
TH
128/*
129 * cgroup_event represents events which userspace want to receive.
130 */
3bc942f3 131struct mem_cgroup_event {
79bd9814 132 /*
59b6f873 133 * memcg which the event belongs to.
79bd9814 134 */
59b6f873 135 struct mem_cgroup *memcg;
79bd9814
TH
136 /*
137 * eventfd to signal userspace about the event.
138 */
139 struct eventfd_ctx *eventfd;
140 /*
141 * Each of these stored in a list by the cgroup.
142 */
143 struct list_head list;
fba94807
TH
144 /*
145 * register_event() callback will be used to add new userspace
146 * waiter for changes related to this event. Use eventfd_signal()
147 * on eventfd to send notification to userspace.
148 */
59b6f873 149 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 150 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
151 /*
152 * unregister_event() callback will be called when userspace closes
153 * the eventfd or on cgroup removing. This callback must be set,
154 * if you want provide notification functionality.
155 */
59b6f873 156 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 157 struct eventfd_ctx *eventfd);
79bd9814
TH
158 /*
159 * All fields below needed to unregister event when
160 * userspace closes eventfd.
161 */
162 poll_table pt;
163 wait_queue_head_t *wqh;
ac6424b9 164 wait_queue_entry_t wait;
79bd9814
TH
165 struct work_struct remove;
166};
167
c0ff4b85
R
168static void mem_cgroup_threshold(struct mem_cgroup *memcg);
169static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 170
7dc74be0
DN
171/* Stuffs for move charges at task migration. */
172/*
1dfab5ab 173 * Types of charges to be moved.
7dc74be0 174 */
1dfab5ab
JW
175#define MOVE_ANON 0x1U
176#define MOVE_FILE 0x2U
177#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
7dc74be0 178
4ffef5fe
DN
179/* "mc" and its members are protected by cgroup_mutex */
180static struct move_charge_struct {
b1dd693e 181 spinlock_t lock; /* for from, to */
264a0ae1 182 struct mm_struct *mm;
4ffef5fe
DN
183 struct mem_cgroup *from;
184 struct mem_cgroup *to;
1dfab5ab 185 unsigned long flags;
4ffef5fe 186 unsigned long precharge;
854ffa8d 187 unsigned long moved_charge;
483c30b5 188 unsigned long moved_swap;
8033b97c
DN
189 struct task_struct *moving_task; /* a task moving charges */
190 wait_queue_head_t waitq; /* a waitq for other context */
191} mc = {
2bd9bb20 192 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
193 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
194};
4ffef5fe 195
4e416953
BS
196/*
197 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
198 * limit reclaim to prevent infinite loops, if they ever occur.
199 */
a0db00fc 200#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 201#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 202
8c7c6e34 203/* for encoding cft->private value on file */
86ae53e1
GC
204enum res_type {
205 _MEM,
206 _MEMSWAP,
207 _OOM_TYPE,
510fc4e1 208 _KMEM,
d55f90bf 209 _TCP,
86ae53e1
GC
210};
211
a0db00fc
KS
212#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
213#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34 214#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
215/* Used for OOM nofiier */
216#define OOM_CONTROL (0)
8c7c6e34 217
b05706f1
KT
218/*
219 * Iteration constructs for visiting all cgroups (under a tree). If
220 * loops are exited prematurely (break), mem_cgroup_iter_break() must
221 * be used for reference counting.
222 */
223#define for_each_mem_cgroup_tree(iter, root) \
224 for (iter = mem_cgroup_iter(root, NULL, NULL); \
225 iter != NULL; \
226 iter = mem_cgroup_iter(root, iter, NULL))
227
228#define for_each_mem_cgroup(iter) \
229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
230 iter != NULL; \
231 iter = mem_cgroup_iter(NULL, iter, NULL))
232
7775face
TH
233static inline bool should_force_charge(void)
234{
235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
236 (current->flags & PF_EXITING);
237}
238
70ddf637
AV
239/* Some nice accessors for the vmpressure. */
240struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
241{
242 if (!memcg)
243 memcg = root_mem_cgroup;
244 return &memcg->vmpressure;
245}
246
247struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
248{
249 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
250}
251
84c07d11 252#ifdef CONFIG_MEMCG_KMEM
bf4f0599
RG
253extern spinlock_t css_set_lock;
254
255static void obj_cgroup_release(struct percpu_ref *ref)
256{
257 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
258 struct mem_cgroup *memcg;
259 unsigned int nr_bytes;
260 unsigned int nr_pages;
261 unsigned long flags;
262
263 /*
264 * At this point all allocated objects are freed, and
265 * objcg->nr_charged_bytes can't have an arbitrary byte value.
266 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
267 *
268 * The following sequence can lead to it:
269 * 1) CPU0: objcg == stock->cached_objcg
270 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
271 * PAGE_SIZE bytes are charged
272 * 3) CPU1: a process from another memcg is allocating something,
273 * the stock if flushed,
274 * objcg->nr_charged_bytes = PAGE_SIZE - 92
275 * 5) CPU0: we do release this object,
276 * 92 bytes are added to stock->nr_bytes
277 * 6) CPU0: stock is flushed,
278 * 92 bytes are added to objcg->nr_charged_bytes
279 *
280 * In the result, nr_charged_bytes == PAGE_SIZE.
281 * This page will be uncharged in obj_cgroup_release().
282 */
283 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
284 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
285 nr_pages = nr_bytes >> PAGE_SHIFT;
286
287 spin_lock_irqsave(&css_set_lock, flags);
288 memcg = obj_cgroup_memcg(objcg);
289 if (nr_pages)
290 __memcg_kmem_uncharge(memcg, nr_pages);
291 list_del(&objcg->list);
292 mem_cgroup_put(memcg);
293 spin_unlock_irqrestore(&css_set_lock, flags);
294
295 percpu_ref_exit(ref);
296 kfree_rcu(objcg, rcu);
297}
298
299static struct obj_cgroup *obj_cgroup_alloc(void)
300{
301 struct obj_cgroup *objcg;
302 int ret;
303
304 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
305 if (!objcg)
306 return NULL;
307
308 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
309 GFP_KERNEL);
310 if (ret) {
311 kfree(objcg);
312 return NULL;
313 }
314 INIT_LIST_HEAD(&objcg->list);
315 return objcg;
316}
317
318static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
319 struct mem_cgroup *parent)
320{
321 struct obj_cgroup *objcg, *iter;
322
323 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
324
325 spin_lock_irq(&css_set_lock);
326
327 /* Move active objcg to the parent's list */
328 xchg(&objcg->memcg, parent);
329 css_get(&parent->css);
330 list_add(&objcg->list, &parent->objcg_list);
331
332 /* Move already reparented objcgs to the parent's list */
333 list_for_each_entry(iter, &memcg->objcg_list, list) {
334 css_get(&parent->css);
335 xchg(&iter->memcg, parent);
336 css_put(&memcg->css);
337 }
338 list_splice(&memcg->objcg_list, &parent->objcg_list);
339
340 spin_unlock_irq(&css_set_lock);
341
342 percpu_ref_kill(&objcg->refcnt);
343}
344
55007d84 345/*
9855609b 346 * This will be used as a shrinker list's index.
b8627835
LZ
347 * The main reason for not using cgroup id for this:
348 * this works better in sparse environments, where we have a lot of memcgs,
349 * but only a few kmem-limited. Or also, if we have, for instance, 200
350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
351 * 200 entry array for that.
55007d84 352 *
dbcf73e2
VD
353 * The current size of the caches array is stored in memcg_nr_cache_ids. It
354 * will double each time we have to increase it.
55007d84 355 */
dbcf73e2
VD
356static DEFINE_IDA(memcg_cache_ida);
357int memcg_nr_cache_ids;
749c5415 358
05257a1a
VD
359/* Protects memcg_nr_cache_ids */
360static DECLARE_RWSEM(memcg_cache_ids_sem);
361
362void memcg_get_cache_ids(void)
363{
364 down_read(&memcg_cache_ids_sem);
365}
366
367void memcg_put_cache_ids(void)
368{
369 up_read(&memcg_cache_ids_sem);
370}
371
55007d84
GC
372/*
373 * MIN_SIZE is different than 1, because we would like to avoid going through
374 * the alloc/free process all the time. In a small machine, 4 kmem-limited
375 * cgroups is a reasonable guess. In the future, it could be a parameter or
376 * tunable, but that is strictly not necessary.
377 *
b8627835 378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
55007d84
GC
379 * this constant directly from cgroup, but it is understandable that this is
380 * better kept as an internal representation in cgroup.c. In any case, the
b8627835 381 * cgrp_id space is not getting any smaller, and we don't have to necessarily
55007d84
GC
382 * increase ours as well if it increases.
383 */
384#define MEMCG_CACHES_MIN_SIZE 4
b8627835 385#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
55007d84 386
d7f25f8a
GC
387/*
388 * A lot of the calls to the cache allocation functions are expected to be
272911a4 389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
d7f25f8a
GC
390 * conditional to this static branch, we'll have to allow modules that does
391 * kmem_cache_alloc and the such to see this symbol as well
392 */
ef12947c 393DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
d7f25f8a 394EXPORT_SYMBOL(memcg_kmem_enabled_key);
0a432dcb 395#endif
17cc4dfe 396
0a4465d3
KT
397static int memcg_shrinker_map_size;
398static DEFINE_MUTEX(memcg_shrinker_map_mutex);
399
400static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
401{
402 kvfree(container_of(head, struct memcg_shrinker_map, rcu));
403}
404
405static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
406 int size, int old_size)
407{
408 struct memcg_shrinker_map *new, *old;
409 int nid;
410
411 lockdep_assert_held(&memcg_shrinker_map_mutex);
412
413 for_each_node(nid) {
414 old = rcu_dereference_protected(
415 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
416 /* Not yet online memcg */
417 if (!old)
418 return 0;
419
86daf94e 420 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
0a4465d3
KT
421 if (!new)
422 return -ENOMEM;
423
424 /* Set all old bits, clear all new bits */
425 memset(new->map, (int)0xff, old_size);
426 memset((void *)new->map + old_size, 0, size - old_size);
427
428 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
429 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
430 }
431
432 return 0;
433}
434
435static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
436{
437 struct mem_cgroup_per_node *pn;
438 struct memcg_shrinker_map *map;
439 int nid;
440
441 if (mem_cgroup_is_root(memcg))
442 return;
443
444 for_each_node(nid) {
445 pn = mem_cgroup_nodeinfo(memcg, nid);
446 map = rcu_dereference_protected(pn->shrinker_map, true);
447 if (map)
448 kvfree(map);
449 rcu_assign_pointer(pn->shrinker_map, NULL);
450 }
451}
452
453static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
454{
455 struct memcg_shrinker_map *map;
456 int nid, size, ret = 0;
457
458 if (mem_cgroup_is_root(memcg))
459 return 0;
460
461 mutex_lock(&memcg_shrinker_map_mutex);
462 size = memcg_shrinker_map_size;
463 for_each_node(nid) {
86daf94e 464 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
0a4465d3
KT
465 if (!map) {
466 memcg_free_shrinker_maps(memcg);
467 ret = -ENOMEM;
468 break;
469 }
470 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
471 }
472 mutex_unlock(&memcg_shrinker_map_mutex);
473
474 return ret;
475}
476
477int memcg_expand_shrinker_maps(int new_id)
478{
479 int size, old_size, ret = 0;
480 struct mem_cgroup *memcg;
481
482 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
483 old_size = memcg_shrinker_map_size;
484 if (size <= old_size)
485 return 0;
486
487 mutex_lock(&memcg_shrinker_map_mutex);
488 if (!root_mem_cgroup)
489 goto unlock;
490
491 for_each_mem_cgroup(memcg) {
492 if (mem_cgroup_is_root(memcg))
493 continue;
494 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
75866af6
VA
495 if (ret) {
496 mem_cgroup_iter_break(NULL, memcg);
0a4465d3 497 goto unlock;
75866af6 498 }
0a4465d3
KT
499 }
500unlock:
501 if (!ret)
502 memcg_shrinker_map_size = size;
503 mutex_unlock(&memcg_shrinker_map_mutex);
504 return ret;
505}
fae91d6d
KT
506
507void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
508{
509 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
510 struct memcg_shrinker_map *map;
511
512 rcu_read_lock();
513 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
f90280d6
KT
514 /* Pairs with smp mb in shrink_slab() */
515 smp_mb__before_atomic();
fae91d6d
KT
516 set_bit(shrinker_id, map->map);
517 rcu_read_unlock();
518 }
519}
520
ad7fa852
TH
521/**
522 * mem_cgroup_css_from_page - css of the memcg associated with a page
523 * @page: page of interest
524 *
525 * If memcg is bound to the default hierarchy, css of the memcg associated
526 * with @page is returned. The returned css remains associated with @page
527 * until it is released.
528 *
529 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
530 * is returned.
ad7fa852
TH
531 */
532struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
533{
534 struct mem_cgroup *memcg;
535
ad7fa852
TH
536 memcg = page->mem_cgroup;
537
9e10a130 538 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
ad7fa852
TH
539 memcg = root_mem_cgroup;
540
ad7fa852
TH
541 return &memcg->css;
542}
543
2fc04524
VD
544/**
545 * page_cgroup_ino - return inode number of the memcg a page is charged to
546 * @page: the page
547 *
548 * Look up the closest online ancestor of the memory cgroup @page is charged to
549 * and return its inode number or 0 if @page is not charged to any cgroup. It
550 * is safe to call this function without holding a reference to @page.
551 *
552 * Note, this function is inherently racy, because there is nothing to prevent
553 * the cgroup inode from getting torn down and potentially reallocated a moment
554 * after page_cgroup_ino() returns, so it only should be used by callers that
555 * do not care (such as procfs interfaces).
556 */
557ino_t page_cgroup_ino(struct page *page)
558{
559 struct mem_cgroup *memcg;
560 unsigned long ino = 0;
561
562 rcu_read_lock();
9855609b 563 memcg = page->mem_cgroup;
286e04b8 564
9855609b
RG
565 /*
566 * The lowest bit set means that memcg isn't a valid
567 * memcg pointer, but a obj_cgroups pointer.
568 * In this case the page is shared and doesn't belong
569 * to any specific memory cgroup.
570 */
571 if ((unsigned long) memcg & 0x1UL)
572 memcg = NULL;
286e04b8 573
2fc04524
VD
574 while (memcg && !(memcg->css.flags & CSS_ONLINE))
575 memcg = parent_mem_cgroup(memcg);
576 if (memcg)
577 ino = cgroup_ino(memcg->css.cgroup);
578 rcu_read_unlock();
579 return ino;
580}
581
ef8f2327
MG
582static struct mem_cgroup_per_node *
583mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
f64c3f54 584{
97a6c37b 585 int nid = page_to_nid(page);
f64c3f54 586
ef8f2327 587 return memcg->nodeinfo[nid];
f64c3f54
BS
588}
589
ef8f2327
MG
590static struct mem_cgroup_tree_per_node *
591soft_limit_tree_node(int nid)
bb4cc1a8 592{
ef8f2327 593 return soft_limit_tree.rb_tree_per_node[nid];
bb4cc1a8
AM
594}
595
ef8f2327 596static struct mem_cgroup_tree_per_node *
bb4cc1a8
AM
597soft_limit_tree_from_page(struct page *page)
598{
599 int nid = page_to_nid(page);
bb4cc1a8 600
ef8f2327 601 return soft_limit_tree.rb_tree_per_node[nid];
bb4cc1a8
AM
602}
603
ef8f2327
MG
604static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
605 struct mem_cgroup_tree_per_node *mctz,
3e32cb2e 606 unsigned long new_usage_in_excess)
bb4cc1a8
AM
607{
608 struct rb_node **p = &mctz->rb_root.rb_node;
609 struct rb_node *parent = NULL;
ef8f2327 610 struct mem_cgroup_per_node *mz_node;
fa90b2fd 611 bool rightmost = true;
bb4cc1a8
AM
612
613 if (mz->on_tree)
614 return;
615
616 mz->usage_in_excess = new_usage_in_excess;
617 if (!mz->usage_in_excess)
618 return;
619 while (*p) {
620 parent = *p;
ef8f2327 621 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
bb4cc1a8 622 tree_node);
fa90b2fd 623 if (mz->usage_in_excess < mz_node->usage_in_excess) {
bb4cc1a8 624 p = &(*p)->rb_left;
fa90b2fd
DB
625 rightmost = false;
626 }
627
bb4cc1a8
AM
628 /*
629 * We can't avoid mem cgroups that are over their soft
630 * limit by the same amount
631 */
632 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
633 p = &(*p)->rb_right;
634 }
fa90b2fd
DB
635
636 if (rightmost)
637 mctz->rb_rightmost = &mz->tree_node;
638
bb4cc1a8
AM
639 rb_link_node(&mz->tree_node, parent, p);
640 rb_insert_color(&mz->tree_node, &mctz->rb_root);
641 mz->on_tree = true;
642}
643
ef8f2327
MG
644static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
645 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
646{
647 if (!mz->on_tree)
648 return;
fa90b2fd
DB
649
650 if (&mz->tree_node == mctz->rb_rightmost)
651 mctz->rb_rightmost = rb_prev(&mz->tree_node);
652
bb4cc1a8
AM
653 rb_erase(&mz->tree_node, &mctz->rb_root);
654 mz->on_tree = false;
655}
656
ef8f2327
MG
657static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
658 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 659{
0a31bc97
JW
660 unsigned long flags;
661
662 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 663 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 664 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
665}
666
3e32cb2e
JW
667static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
668{
669 unsigned long nr_pages = page_counter_read(&memcg->memory);
4db0c3c2 670 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
3e32cb2e
JW
671 unsigned long excess = 0;
672
673 if (nr_pages > soft_limit)
674 excess = nr_pages - soft_limit;
675
676 return excess;
677}
bb4cc1a8
AM
678
679static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
680{
3e32cb2e 681 unsigned long excess;
ef8f2327
MG
682 struct mem_cgroup_per_node *mz;
683 struct mem_cgroup_tree_per_node *mctz;
bb4cc1a8 684
e231875b 685 mctz = soft_limit_tree_from_page(page);
bfc7228b
LD
686 if (!mctz)
687 return;
bb4cc1a8
AM
688 /*
689 * Necessary to update all ancestors when hierarchy is used.
690 * because their event counter is not touched.
691 */
692 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
ef8f2327 693 mz = mem_cgroup_page_nodeinfo(memcg, page);
3e32cb2e 694 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
695 /*
696 * We have to update the tree if mz is on RB-tree or
697 * mem is over its softlimit.
698 */
699 if (excess || mz->on_tree) {
0a31bc97
JW
700 unsigned long flags;
701
702 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
703 /* if on-tree, remove it */
704 if (mz->on_tree)
cf2c8127 705 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
706 /*
707 * Insert again. mz->usage_in_excess will be updated.
708 * If excess is 0, no tree ops.
709 */
cf2c8127 710 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 711 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
712 }
713 }
714}
715
716static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
717{
ef8f2327
MG
718 struct mem_cgroup_tree_per_node *mctz;
719 struct mem_cgroup_per_node *mz;
720 int nid;
bb4cc1a8 721
e231875b 722 for_each_node(nid) {
ef8f2327
MG
723 mz = mem_cgroup_nodeinfo(memcg, nid);
724 mctz = soft_limit_tree_node(nid);
bfc7228b
LD
725 if (mctz)
726 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
727 }
728}
729
ef8f2327
MG
730static struct mem_cgroup_per_node *
731__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 732{
ef8f2327 733 struct mem_cgroup_per_node *mz;
bb4cc1a8
AM
734
735retry:
736 mz = NULL;
fa90b2fd 737 if (!mctz->rb_rightmost)
bb4cc1a8
AM
738 goto done; /* Nothing to reclaim from */
739
fa90b2fd
DB
740 mz = rb_entry(mctz->rb_rightmost,
741 struct mem_cgroup_per_node, tree_node);
bb4cc1a8
AM
742 /*
743 * Remove the node now but someone else can add it back,
744 * we will to add it back at the end of reclaim to its correct
745 * position in the tree.
746 */
cf2c8127 747 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 748 if (!soft_limit_excess(mz->memcg) ||
8965aa28 749 !css_tryget(&mz->memcg->css))
bb4cc1a8
AM
750 goto retry;
751done:
752 return mz;
753}
754
ef8f2327
MG
755static struct mem_cgroup_per_node *
756mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 757{
ef8f2327 758 struct mem_cgroup_per_node *mz;
bb4cc1a8 759
0a31bc97 760 spin_lock_irq(&mctz->lock);
bb4cc1a8 761 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 762 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
763 return mz;
764}
765
db9adbcb
JW
766/**
767 * __mod_memcg_state - update cgroup memory statistics
768 * @memcg: the memory cgroup
769 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
770 * @val: delta to add to the counter, can be negative
771 */
772void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
773{
ea426c2a 774 long x, threshold = MEMCG_CHARGE_BATCH;
db9adbcb
JW
775
776 if (mem_cgroup_disabled())
777 return;
778
772616b0 779 if (memcg_stat_item_in_bytes(idx))
ea426c2a
RG
780 threshold <<= PAGE_SHIFT;
781
db9adbcb 782 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
ea426c2a 783 if (unlikely(abs(x) > threshold)) {
42a30035
JW
784 struct mem_cgroup *mi;
785
766a4c19
YS
786 /*
787 * Batch local counters to keep them in sync with
788 * the hierarchical ones.
789 */
790 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
42a30035
JW
791 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
792 atomic_long_add(x, &mi->vmstats[idx]);
db9adbcb
JW
793 x = 0;
794 }
795 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
796}
797
42a30035
JW
798static struct mem_cgroup_per_node *
799parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
800{
801 struct mem_cgroup *parent;
802
803 parent = parent_mem_cgroup(pn->memcg);
804 if (!parent)
805 return NULL;
806 return mem_cgroup_nodeinfo(parent, nid);
807}
808
eedc4e5a
RG
809void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
810 int val)
db9adbcb
JW
811{
812 struct mem_cgroup_per_node *pn;
42a30035 813 struct mem_cgroup *memcg;
ea426c2a 814 long x, threshold = MEMCG_CHARGE_BATCH;
db9adbcb 815
db9adbcb 816 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
42a30035 817 memcg = pn->memcg;
db9adbcb
JW
818
819 /* Update memcg */
42a30035 820 __mod_memcg_state(memcg, idx, val);
db9adbcb 821
b4c46484
RG
822 /* Update lruvec */
823 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
824
ea426c2a
RG
825 if (vmstat_item_in_bytes(idx))
826 threshold <<= PAGE_SHIFT;
827
db9adbcb 828 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
ea426c2a 829 if (unlikely(abs(x) > threshold)) {
eedc4e5a 830 pg_data_t *pgdat = lruvec_pgdat(lruvec);
42a30035
JW
831 struct mem_cgroup_per_node *pi;
832
42a30035
JW
833 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
834 atomic_long_add(x, &pi->lruvec_stat[idx]);
db9adbcb
JW
835 x = 0;
836 }
837 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
838}
839
eedc4e5a
RG
840/**
841 * __mod_lruvec_state - update lruvec memory statistics
842 * @lruvec: the lruvec
843 * @idx: the stat item
844 * @val: delta to add to the counter, can be negative
845 *
846 * The lruvec is the intersection of the NUMA node and a cgroup. This
847 * function updates the all three counters that are affected by a
848 * change of state at this level: per-node, per-cgroup, per-lruvec.
849 */
850void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
851 int val)
852{
853 /* Update node */
854 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
855
856 /* Update memcg and lruvec */
857 if (!mem_cgroup_disabled())
858 __mod_memcg_lruvec_state(lruvec, idx, val);
859}
860
ec9f0238
RG
861void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
862{
4f103c63 863 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
ec9f0238
RG
864 struct mem_cgroup *memcg;
865 struct lruvec *lruvec;
866
867 rcu_read_lock();
4f103c63 868 memcg = mem_cgroup_from_obj(p);
ec9f0238
RG
869
870 /* Untracked pages have no memcg, no lruvec. Update only the node */
871 if (!memcg || memcg == root_mem_cgroup) {
872 __mod_node_page_state(pgdat, idx, val);
873 } else {
867e5e1d 874 lruvec = mem_cgroup_lruvec(memcg, pgdat);
ec9f0238
RG
875 __mod_lruvec_state(lruvec, idx, val);
876 }
877 rcu_read_unlock();
878}
879
8380ce47
RG
880void mod_memcg_obj_state(void *p, int idx, int val)
881{
882 struct mem_cgroup *memcg;
883
884 rcu_read_lock();
885 memcg = mem_cgroup_from_obj(p);
886 if (memcg)
887 mod_memcg_state(memcg, idx, val);
888 rcu_read_unlock();
889}
890
db9adbcb
JW
891/**
892 * __count_memcg_events - account VM events in a cgroup
893 * @memcg: the memory cgroup
894 * @idx: the event item
895 * @count: the number of events that occured
896 */
897void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
898 unsigned long count)
899{
900 unsigned long x;
901
902 if (mem_cgroup_disabled())
903 return;
904
905 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
906 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
42a30035
JW
907 struct mem_cgroup *mi;
908
766a4c19
YS
909 /*
910 * Batch local counters to keep them in sync with
911 * the hierarchical ones.
912 */
913 __this_cpu_add(memcg->vmstats_local->events[idx], x);
42a30035
JW
914 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
915 atomic_long_add(x, &mi->vmevents[idx]);
db9adbcb
JW
916 x = 0;
917 }
918 __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
919}
920
42a30035 921static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
e9f8974f 922{
871789d4 923 return atomic_long_read(&memcg->vmevents[event]);
e9f8974f
JW
924}
925
42a30035
JW
926static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
927{
815744d7
JW
928 long x = 0;
929 int cpu;
930
931 for_each_possible_cpu(cpu)
932 x += per_cpu(memcg->vmstats_local->events[event], cpu);
933 return x;
42a30035
JW
934}
935
c0ff4b85 936static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
b070e65c 937 struct page *page,
3fba69a5 938 int nr_pages)
d52aa412 939{
e401f176
KH
940 /* pagein of a big page is an event. So, ignore page size */
941 if (nr_pages > 0)
c9019e9b 942 __count_memcg_events(memcg, PGPGIN, 1);
3751d604 943 else {
c9019e9b 944 __count_memcg_events(memcg, PGPGOUT, 1);
3751d604
KH
945 nr_pages = -nr_pages; /* for event */
946 }
e401f176 947
871789d4 948 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
6d12e2d8
KH
949}
950
f53d7ce3
JW
951static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
952 enum mem_cgroup_events_target target)
7a159cc9
JW
953{
954 unsigned long val, next;
955
871789d4
CD
956 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
957 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
7a159cc9 958 /* from time_after() in jiffies.h */
6a1a8b80 959 if ((long)(next - val) < 0) {
f53d7ce3
JW
960 switch (target) {
961 case MEM_CGROUP_TARGET_THRESH:
962 next = val + THRESHOLDS_EVENTS_TARGET;
963 break;
bb4cc1a8
AM
964 case MEM_CGROUP_TARGET_SOFTLIMIT:
965 next = val + SOFTLIMIT_EVENTS_TARGET;
966 break;
f53d7ce3
JW
967 default:
968 break;
969 }
871789d4 970 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
f53d7ce3 971 return true;
7a159cc9 972 }
f53d7ce3 973 return false;
d2265e6f
KH
974}
975
976/*
977 * Check events in order.
978 *
979 */
c0ff4b85 980static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
d2265e6f
KH
981{
982 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
983 if (unlikely(mem_cgroup_event_ratelimit(memcg,
984 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 985 bool do_softlimit;
f53d7ce3 986
bb4cc1a8
AM
987 do_softlimit = mem_cgroup_event_ratelimit(memcg,
988 MEM_CGROUP_TARGET_SOFTLIMIT);
c0ff4b85 989 mem_cgroup_threshold(memcg);
bb4cc1a8
AM
990 if (unlikely(do_softlimit))
991 mem_cgroup_update_tree(memcg, page);
0a31bc97 992 }
d2265e6f
KH
993}
994
cf475ad2 995struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 996{
31a78f23
BS
997 /*
998 * mm_update_next_owner() may clear mm->owner to NULL
999 * if it races with swapoff, page migration, etc.
1000 * So this can be called with p == NULL.
1001 */
1002 if (unlikely(!p))
1003 return NULL;
1004
073219e9 1005 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466 1006}
33398cf2 1007EXPORT_SYMBOL(mem_cgroup_from_task);
78fb7466 1008
d46eb14b
SB
1009/**
1010 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1011 * @mm: mm from which memcg should be extracted. It can be NULL.
1012 *
1013 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1014 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1015 * returned.
1016 */
1017struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1018{
d46eb14b
SB
1019 struct mem_cgroup *memcg;
1020
1021 if (mem_cgroup_disabled())
1022 return NULL;
0b7f569e 1023
54595fe2
KH
1024 rcu_read_lock();
1025 do {
6f6acb00
MH
1026 /*
1027 * Page cache insertions can happen withou an
1028 * actual mm context, e.g. during disk probing
1029 * on boot, loopback IO, acct() writes etc.
1030 */
1031 if (unlikely(!mm))
df381975 1032 memcg = root_mem_cgroup;
6f6acb00
MH
1033 else {
1034 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1035 if (unlikely(!memcg))
1036 memcg = root_mem_cgroup;
1037 }
00d484f3 1038 } while (!css_tryget(&memcg->css));
54595fe2 1039 rcu_read_unlock();
c0ff4b85 1040 return memcg;
54595fe2 1041}
d46eb14b
SB
1042EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1043
f745c6f5
SB
1044/**
1045 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1046 * @page: page from which memcg should be extracted.
1047 *
1048 * Obtain a reference on page->memcg and returns it if successful. Otherwise
1049 * root_mem_cgroup is returned.
1050 */
1051struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1052{
1053 struct mem_cgroup *memcg = page->mem_cgroup;
1054
1055 if (mem_cgroup_disabled())
1056 return NULL;
1057
1058 rcu_read_lock();
8965aa28
SB
1059 /* Page should not get uncharged and freed memcg under us. */
1060 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
f745c6f5
SB
1061 memcg = root_mem_cgroup;
1062 rcu_read_unlock();
1063 return memcg;
1064}
1065EXPORT_SYMBOL(get_mem_cgroup_from_page);
1066
37d5985c 1067static __always_inline struct mem_cgroup *active_memcg(void)
d46eb14b 1068{
37d5985c
RG
1069 if (in_interrupt())
1070 return this_cpu_read(int_active_memcg);
1071 else
1072 return current->active_memcg;
1073}
279c3393 1074
37d5985c
RG
1075static __always_inline struct mem_cgroup *get_active_memcg(void)
1076{
1077 struct mem_cgroup *memcg;
d46eb14b 1078
37d5985c
RG
1079 rcu_read_lock();
1080 memcg = active_memcg();
1081 if (memcg) {
8965aa28 1082 /* current->active_memcg must hold a ref. */
37d5985c 1083 if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
8965aa28
SB
1084 memcg = root_mem_cgroup;
1085 else
d46eb14b 1086 memcg = current->active_memcg;
d46eb14b 1087 }
37d5985c
RG
1088 rcu_read_unlock();
1089
1090 return memcg;
1091}
1092
4127c650
RG
1093static __always_inline bool memcg_kmem_bypass(void)
1094{
1095 /* Allow remote memcg charging from any context. */
1096 if (unlikely(active_memcg()))
1097 return false;
1098
1099 /* Memcg to charge can't be determined. */
1100 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
1101 return true;
1102
1103 return false;
1104}
1105
37d5985c
RG
1106/**
1107 * If active memcg is set, do not fallback to current->mm->memcg.
1108 */
1109static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1110{
1111 if (memcg_kmem_bypass())
1112 return NULL;
1113
1114 if (unlikely(active_memcg()))
1115 return get_active_memcg();
1116
d46eb14b
SB
1117 return get_mem_cgroup_from_mm(current->mm);
1118}
54595fe2 1119
5660048c
JW
1120/**
1121 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1122 * @root: hierarchy root
1123 * @prev: previously returned memcg, NULL on first invocation
1124 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1125 *
1126 * Returns references to children of the hierarchy below @root, or
1127 * @root itself, or %NULL after a full round-trip.
1128 *
1129 * Caller must pass the return value in @prev on subsequent
1130 * invocations for reference counting, or use mem_cgroup_iter_break()
1131 * to cancel a hierarchy walk before the round-trip is complete.
1132 *
05bdc520
ML
1133 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1134 * in the hierarchy among all concurrent reclaimers operating on the
1135 * same node.
5660048c 1136 */
694fbc0f 1137struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1138 struct mem_cgroup *prev,
694fbc0f 1139 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1140{
3f649ab7 1141 struct mem_cgroup_reclaim_iter *iter;
5ac8fb31 1142 struct cgroup_subsys_state *css = NULL;
9f3a0d09 1143 struct mem_cgroup *memcg = NULL;
5ac8fb31 1144 struct mem_cgroup *pos = NULL;
711d3d2c 1145
694fbc0f
AM
1146 if (mem_cgroup_disabled())
1147 return NULL;
5660048c 1148
9f3a0d09
JW
1149 if (!root)
1150 root = root_mem_cgroup;
7d74b06f 1151
9f3a0d09 1152 if (prev && !reclaim)
5ac8fb31 1153 pos = prev;
14067bb3 1154
9f3a0d09
JW
1155 if (!root->use_hierarchy && root != root_mem_cgroup) {
1156 if (prev)
5ac8fb31 1157 goto out;
694fbc0f 1158 return root;
9f3a0d09 1159 }
14067bb3 1160
542f85f9 1161 rcu_read_lock();
5f578161 1162
5ac8fb31 1163 if (reclaim) {
ef8f2327 1164 struct mem_cgroup_per_node *mz;
5ac8fb31 1165
ef8f2327 1166 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
9da83f3f 1167 iter = &mz->iter;
5ac8fb31
JW
1168
1169 if (prev && reclaim->generation != iter->generation)
1170 goto out_unlock;
1171
6df38689 1172 while (1) {
4db0c3c2 1173 pos = READ_ONCE(iter->position);
6df38689
VD
1174 if (!pos || css_tryget(&pos->css))
1175 break;
5ac8fb31 1176 /*
6df38689
VD
1177 * css reference reached zero, so iter->position will
1178 * be cleared by ->css_released. However, we should not
1179 * rely on this happening soon, because ->css_released
1180 * is called from a work queue, and by busy-waiting we
1181 * might block it. So we clear iter->position right
1182 * away.
5ac8fb31 1183 */
6df38689
VD
1184 (void)cmpxchg(&iter->position, pos, NULL);
1185 }
5ac8fb31
JW
1186 }
1187
1188 if (pos)
1189 css = &pos->css;
1190
1191 for (;;) {
1192 css = css_next_descendant_pre(css, &root->css);
1193 if (!css) {
1194 /*
1195 * Reclaimers share the hierarchy walk, and a
1196 * new one might jump in right at the end of
1197 * the hierarchy - make sure they see at least
1198 * one group and restart from the beginning.
1199 */
1200 if (!prev)
1201 continue;
1202 break;
527a5ec9 1203 }
7d74b06f 1204
5ac8fb31
JW
1205 /*
1206 * Verify the css and acquire a reference. The root
1207 * is provided by the caller, so we know it's alive
1208 * and kicking, and don't take an extra reference.
1209 */
1210 memcg = mem_cgroup_from_css(css);
14067bb3 1211
5ac8fb31
JW
1212 if (css == &root->css)
1213 break;
14067bb3 1214
0b8f73e1
JW
1215 if (css_tryget(css))
1216 break;
9f3a0d09 1217
5ac8fb31 1218 memcg = NULL;
9f3a0d09 1219 }
5ac8fb31
JW
1220
1221 if (reclaim) {
5ac8fb31 1222 /*
6df38689
VD
1223 * The position could have already been updated by a competing
1224 * thread, so check that the value hasn't changed since we read
1225 * it to avoid reclaiming from the same cgroup twice.
5ac8fb31 1226 */
6df38689
VD
1227 (void)cmpxchg(&iter->position, pos, memcg);
1228
5ac8fb31
JW
1229 if (pos)
1230 css_put(&pos->css);
1231
1232 if (!memcg)
1233 iter->generation++;
1234 else if (!prev)
1235 reclaim->generation = iter->generation;
9f3a0d09 1236 }
5ac8fb31 1237
542f85f9
MH
1238out_unlock:
1239 rcu_read_unlock();
5ac8fb31 1240out:
c40046f3
MH
1241 if (prev && prev != root)
1242 css_put(&prev->css);
1243
9f3a0d09 1244 return memcg;
14067bb3 1245}
7d74b06f 1246
5660048c
JW
1247/**
1248 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1249 * @root: hierarchy root
1250 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1251 */
1252void mem_cgroup_iter_break(struct mem_cgroup *root,
1253 struct mem_cgroup *prev)
9f3a0d09
JW
1254{
1255 if (!root)
1256 root = root_mem_cgroup;
1257 if (prev && prev != root)
1258 css_put(&prev->css);
1259}
7d74b06f 1260
54a83d6b
MC
1261static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1262 struct mem_cgroup *dead_memcg)
6df38689 1263{
6df38689 1264 struct mem_cgroup_reclaim_iter *iter;
ef8f2327
MG
1265 struct mem_cgroup_per_node *mz;
1266 int nid;
6df38689 1267
54a83d6b
MC
1268 for_each_node(nid) {
1269 mz = mem_cgroup_nodeinfo(from, nid);
9da83f3f
YS
1270 iter = &mz->iter;
1271 cmpxchg(&iter->position, dead_memcg, NULL);
6df38689
VD
1272 }
1273}
1274
54a83d6b
MC
1275static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1276{
1277 struct mem_cgroup *memcg = dead_memcg;
1278 struct mem_cgroup *last;
1279
1280 do {
1281 __invalidate_reclaim_iterators(memcg, dead_memcg);
1282 last = memcg;
1283 } while ((memcg = parent_mem_cgroup(memcg)));
1284
1285 /*
1286 * When cgruop1 non-hierarchy mode is used,
1287 * parent_mem_cgroup() does not walk all the way up to the
1288 * cgroup root (root_mem_cgroup). So we have to handle
1289 * dead_memcg from cgroup root separately.
1290 */
1291 if (last != root_mem_cgroup)
1292 __invalidate_reclaim_iterators(root_mem_cgroup,
1293 dead_memcg);
1294}
1295
7c5f64f8
VD
1296/**
1297 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1298 * @memcg: hierarchy root
1299 * @fn: function to call for each task
1300 * @arg: argument passed to @fn
1301 *
1302 * This function iterates over tasks attached to @memcg or to any of its
1303 * descendants and calls @fn for each task. If @fn returns a non-zero
1304 * value, the function breaks the iteration loop and returns the value.
1305 * Otherwise, it will iterate over all tasks and return 0.
1306 *
1307 * This function must not be called for the root memory cgroup.
1308 */
1309int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1310 int (*fn)(struct task_struct *, void *), void *arg)
1311{
1312 struct mem_cgroup *iter;
1313 int ret = 0;
1314
1315 BUG_ON(memcg == root_mem_cgroup);
1316
1317 for_each_mem_cgroup_tree(iter, memcg) {
1318 struct css_task_iter it;
1319 struct task_struct *task;
1320
f168a9a5 1321 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
7c5f64f8
VD
1322 while (!ret && (task = css_task_iter_next(&it)))
1323 ret = fn(task, arg);
1324 css_task_iter_end(&it);
1325 if (ret) {
1326 mem_cgroup_iter_break(memcg, iter);
1327 break;
1328 }
1329 }
1330 return ret;
1331}
1332
925b7673 1333/**
dfe0e773 1334 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
925b7673 1335 * @page: the page
f144c390 1336 * @pgdat: pgdat of the page
dfe0e773 1337 *
a0b5b414
JW
1338 * This function relies on page->mem_cgroup being stable - see the
1339 * access rules in commit_charge().
925b7673 1340 */
599d0c95 1341struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
08e552c6 1342{
ef8f2327 1343 struct mem_cgroup_per_node *mz;
925b7673 1344 struct mem_cgroup *memcg;
bea8c150 1345 struct lruvec *lruvec;
6d12e2d8 1346
bea8c150 1347 if (mem_cgroup_disabled()) {
867e5e1d 1348 lruvec = &pgdat->__lruvec;
bea8c150
HD
1349 goto out;
1350 }
925b7673 1351
1306a85a 1352 memcg = page->mem_cgroup;
7512102c 1353 /*
dfe0e773 1354 * Swapcache readahead pages are added to the LRU - and
29833315 1355 * possibly migrated - before they are charged.
7512102c 1356 */
29833315
JW
1357 if (!memcg)
1358 memcg = root_mem_cgroup;
7512102c 1359
ef8f2327 1360 mz = mem_cgroup_page_nodeinfo(memcg, page);
bea8c150
HD
1361 lruvec = &mz->lruvec;
1362out:
1363 /*
1364 * Since a node can be onlined after the mem_cgroup was created,
1365 * we have to be prepared to initialize lruvec->zone here;
1366 * and if offlined then reonlined, we need to reinitialize it.
1367 */
599d0c95
MG
1368 if (unlikely(lruvec->pgdat != pgdat))
1369 lruvec->pgdat = pgdat;
bea8c150 1370 return lruvec;
08e552c6 1371}
b69408e8 1372
925b7673 1373/**
fa9add64
HD
1374 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1375 * @lruvec: mem_cgroup per zone lru vector
1376 * @lru: index of lru list the page is sitting on
b4536f0c 1377 * @zid: zone id of the accounted pages
fa9add64 1378 * @nr_pages: positive when adding or negative when removing
925b7673 1379 *
ca707239
HD
1380 * This function must be called under lru_lock, just before a page is added
1381 * to or just after a page is removed from an lru list (that ordering being
1382 * so as to allow it to check that lru_size 0 is consistent with list_empty).
3f58a829 1383 */
fa9add64 1384void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 1385 int zid, int nr_pages)
3f58a829 1386{
ef8f2327 1387 struct mem_cgroup_per_node *mz;
fa9add64 1388 unsigned long *lru_size;
ca707239 1389 long size;
3f58a829
MK
1390
1391 if (mem_cgroup_disabled())
1392 return;
1393
ef8f2327 1394 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c 1395 lru_size = &mz->lru_zone_size[zid][lru];
ca707239
HD
1396
1397 if (nr_pages < 0)
1398 *lru_size += nr_pages;
1399
1400 size = *lru_size;
b4536f0c
MH
1401 if (WARN_ONCE(size < 0,
1402 "%s(%p, %d, %d): lru_size %ld\n",
1403 __func__, lruvec, lru, nr_pages, size)) {
ca707239
HD
1404 VM_BUG_ON(1);
1405 *lru_size = 0;
1406 }
1407
1408 if (nr_pages > 0)
1409 *lru_size += nr_pages;
08e552c6 1410}
544122e5 1411
19942822 1412/**
9d11ea9f 1413 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1414 * @memcg: the memory cgroup
19942822 1415 *
9d11ea9f 1416 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1417 * pages.
19942822 1418 */
c0ff4b85 1419static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1420{
3e32cb2e
JW
1421 unsigned long margin = 0;
1422 unsigned long count;
1423 unsigned long limit;
9d11ea9f 1424
3e32cb2e 1425 count = page_counter_read(&memcg->memory);
bbec2e15 1426 limit = READ_ONCE(memcg->memory.max);
3e32cb2e
JW
1427 if (count < limit)
1428 margin = limit - count;
1429
7941d214 1430 if (do_memsw_account()) {
3e32cb2e 1431 count = page_counter_read(&memcg->memsw);
bbec2e15 1432 limit = READ_ONCE(memcg->memsw.max);
1c4448ed 1433 if (count < limit)
3e32cb2e 1434 margin = min(margin, limit - count);
cbedbac3
LR
1435 else
1436 margin = 0;
3e32cb2e
JW
1437 }
1438
1439 return margin;
19942822
JW
1440}
1441
32047e2a 1442/*
bdcbb659 1443 * A routine for checking "mem" is under move_account() or not.
32047e2a 1444 *
bdcbb659
QH
1445 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1446 * moving cgroups. This is for waiting at high-memory pressure
1447 * caused by "move".
32047e2a 1448 */
c0ff4b85 1449static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1450{
2bd9bb20
KH
1451 struct mem_cgroup *from;
1452 struct mem_cgroup *to;
4b534334 1453 bool ret = false;
2bd9bb20
KH
1454 /*
1455 * Unlike task_move routines, we access mc.to, mc.from not under
1456 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1457 */
1458 spin_lock(&mc.lock);
1459 from = mc.from;
1460 to = mc.to;
1461 if (!from)
1462 goto unlock;
3e92041d 1463
2314b42d
JW
1464 ret = mem_cgroup_is_descendant(from, memcg) ||
1465 mem_cgroup_is_descendant(to, memcg);
2bd9bb20
KH
1466unlock:
1467 spin_unlock(&mc.lock);
4b534334
KH
1468 return ret;
1469}
1470
c0ff4b85 1471static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1472{
1473 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1474 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1475 DEFINE_WAIT(wait);
1476 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1477 /* moving charge context might have finished. */
1478 if (mc.moving_task)
1479 schedule();
1480 finish_wait(&mc.waitq, &wait);
1481 return true;
1482 }
1483 }
1484 return false;
1485}
1486
5f9a4f4a
MS
1487struct memory_stat {
1488 const char *name;
1489 unsigned int ratio;
1490 unsigned int idx;
1491};
1492
1493static struct memory_stat memory_stats[] = {
1494 { "anon", PAGE_SIZE, NR_ANON_MAPPED },
1495 { "file", PAGE_SIZE, NR_FILE_PAGES },
1496 { "kernel_stack", 1024, NR_KERNEL_STACK_KB },
1497 { "percpu", 1, MEMCG_PERCPU_B },
1498 { "sock", PAGE_SIZE, MEMCG_SOCK },
1499 { "shmem", PAGE_SIZE, NR_SHMEM },
1500 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED },
1501 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
1502 { "file_writeback", PAGE_SIZE, NR_WRITEBACK },
1503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1504 /*
1505 * The ratio will be initialized in memory_stats_init(). Because
1506 * on some architectures, the macro of HPAGE_PMD_SIZE is not
1507 * constant(e.g. powerpc).
1508 */
1509 { "anon_thp", 0, NR_ANON_THPS },
1510#endif
1511 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
1512 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
1513 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE },
1514 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE },
1515 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE },
1516
1517 /*
1518 * Note: The slab_reclaimable and slab_unreclaimable must be
1519 * together and slab_reclaimable must be in front.
1520 */
1521 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B },
1522 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B },
1523
1524 /* The memory events */
1525 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON },
1526 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE },
1527 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON },
1528 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE },
1529 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON },
1530 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE },
1531 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
1532};
1533
1534static int __init memory_stats_init(void)
1535{
1536 int i;
1537
1538 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1539#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1540 if (memory_stats[i].idx == NR_ANON_THPS)
1541 memory_stats[i].ratio = HPAGE_PMD_SIZE;
1542#endif
1543 VM_BUG_ON(!memory_stats[i].ratio);
1544 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
1545 }
1546
1547 return 0;
1548}
1549pure_initcall(memory_stats_init);
1550
c8713d0b
JW
1551static char *memory_stat_format(struct mem_cgroup *memcg)
1552{
1553 struct seq_buf s;
1554 int i;
71cd3113 1555
c8713d0b
JW
1556 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1557 if (!s.buffer)
1558 return NULL;
1559
1560 /*
1561 * Provide statistics on the state of the memory subsystem as
1562 * well as cumulative event counters that show past behavior.
1563 *
1564 * This list is ordered following a combination of these gradients:
1565 * 1) generic big picture -> specifics and details
1566 * 2) reflecting userspace activity -> reflecting kernel heuristics
1567 *
1568 * Current memory state:
1569 */
1570
5f9a4f4a
MS
1571 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1572 u64 size;
c8713d0b 1573
5f9a4f4a
MS
1574 size = memcg_page_state(memcg, memory_stats[i].idx);
1575 size *= memory_stats[i].ratio;
1576 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
c8713d0b 1577
5f9a4f4a
MS
1578 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1579 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1580 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
1581 seq_buf_printf(&s, "slab %llu\n", size);
1582 }
1583 }
c8713d0b
JW
1584
1585 /* Accumulated memory events */
1586
ebc5d83d
KK
1587 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1588 memcg_events(memcg, PGFAULT));
1589 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1590 memcg_events(memcg, PGMAJFAULT));
ebc5d83d
KK
1591 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
1592 memcg_events(memcg, PGREFILL));
c8713d0b
JW
1593 seq_buf_printf(&s, "pgscan %lu\n",
1594 memcg_events(memcg, PGSCAN_KSWAPD) +
1595 memcg_events(memcg, PGSCAN_DIRECT));
1596 seq_buf_printf(&s, "pgsteal %lu\n",
1597 memcg_events(memcg, PGSTEAL_KSWAPD) +
1598 memcg_events(memcg, PGSTEAL_DIRECT));
ebc5d83d
KK
1599 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1600 memcg_events(memcg, PGACTIVATE));
1601 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1602 memcg_events(memcg, PGDEACTIVATE));
1603 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1604 memcg_events(memcg, PGLAZYFREE));
1605 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1606 memcg_events(memcg, PGLAZYFREED));
c8713d0b
JW
1607
1608#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ebc5d83d 1609 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
c8713d0b 1610 memcg_events(memcg, THP_FAULT_ALLOC));
ebc5d83d 1611 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
c8713d0b
JW
1612 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1613#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1614
1615 /* The above should easily fit into one page */
1616 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1617
1618 return s.buffer;
1619}
71cd3113 1620
58cf188e 1621#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1622/**
f0c867d9 1623 * mem_cgroup_print_oom_context: Print OOM information relevant to
1624 * memory controller.
e222432b
BS
1625 * @memcg: The memory cgroup that went over limit
1626 * @p: Task that is going to be killed
1627 *
1628 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1629 * enabled
1630 */
f0c867d9 1631void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
e222432b 1632{
e222432b
BS
1633 rcu_read_lock();
1634
f0c867d9 1635 if (memcg) {
1636 pr_cont(",oom_memcg=");
1637 pr_cont_cgroup_path(memcg->css.cgroup);
1638 } else
1639 pr_cont(",global_oom");
2415b9f5 1640 if (p) {
f0c867d9 1641 pr_cont(",task_memcg=");
2415b9f5 1642 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
2415b9f5 1643 }
e222432b 1644 rcu_read_unlock();
f0c867d9 1645}
1646
1647/**
1648 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1649 * memory controller.
1650 * @memcg: The memory cgroup that went over limit
1651 */
1652void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1653{
c8713d0b 1654 char *buf;
e222432b 1655
3e32cb2e
JW
1656 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1657 K((u64)page_counter_read(&memcg->memory)),
15b42562 1658 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
c8713d0b
JW
1659 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1660 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1661 K((u64)page_counter_read(&memcg->swap)),
32d087cd 1662 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
c8713d0b
JW
1663 else {
1664 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1665 K((u64)page_counter_read(&memcg->memsw)),
1666 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1667 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1668 K((u64)page_counter_read(&memcg->kmem)),
1669 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
58cf188e 1670 }
c8713d0b
JW
1671
1672 pr_info("Memory cgroup stats for ");
1673 pr_cont_cgroup_path(memcg->css.cgroup);
1674 pr_cont(":");
1675 buf = memory_stat_format(memcg);
1676 if (!buf)
1677 return;
1678 pr_info("%s", buf);
1679 kfree(buf);
e222432b
BS
1680}
1681
a63d83f4
DR
1682/*
1683 * Return the memory (and swap, if configured) limit for a memcg.
1684 */
bbec2e15 1685unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
a63d83f4 1686{
8d387a5f
WL
1687 unsigned long max = READ_ONCE(memcg->memory.max);
1688
1689 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1690 if (mem_cgroup_swappiness(memcg))
1691 max += min(READ_ONCE(memcg->swap.max),
1692 (unsigned long)total_swap_pages);
1693 } else { /* v1 */
1694 if (mem_cgroup_swappiness(memcg)) {
1695 /* Calculate swap excess capacity from memsw limit */
1696 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1697
1698 max += min(swap, (unsigned long)total_swap_pages);
1699 }
9a5a8f19 1700 }
bbec2e15 1701 return max;
a63d83f4
DR
1702}
1703
9783aa99
CD
1704unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1705{
1706 return page_counter_read(&memcg->memory);
1707}
1708
b6e6edcf 1709static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
19965460 1710 int order)
9cbb78bb 1711{
6e0fc46d
DR
1712 struct oom_control oc = {
1713 .zonelist = NULL,
1714 .nodemask = NULL,
2a966b77 1715 .memcg = memcg,
6e0fc46d
DR
1716 .gfp_mask = gfp_mask,
1717 .order = order,
6e0fc46d 1718 };
1378b37d 1719 bool ret = true;
9cbb78bb 1720
7775face
TH
1721 if (mutex_lock_killable(&oom_lock))
1722 return true;
1378b37d
YS
1723
1724 if (mem_cgroup_margin(memcg) >= (1 << order))
1725 goto unlock;
1726
7775face
TH
1727 /*
1728 * A few threads which were not waiting at mutex_lock_killable() can
1729 * fail to bail out. Therefore, check again after holding oom_lock.
1730 */
1731 ret = should_force_charge() || out_of_memory(&oc);
1378b37d
YS
1732
1733unlock:
dc56401f 1734 mutex_unlock(&oom_lock);
7c5f64f8 1735 return ret;
9cbb78bb
DR
1736}
1737
0608f43d 1738static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
ef8f2327 1739 pg_data_t *pgdat,
0608f43d
AM
1740 gfp_t gfp_mask,
1741 unsigned long *total_scanned)
1742{
1743 struct mem_cgroup *victim = NULL;
1744 int total = 0;
1745 int loop = 0;
1746 unsigned long excess;
1747 unsigned long nr_scanned;
1748 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 1749 .pgdat = pgdat,
0608f43d
AM
1750 };
1751
3e32cb2e 1752 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1753
1754 while (1) {
1755 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1756 if (!victim) {
1757 loop++;
1758 if (loop >= 2) {
1759 /*
1760 * If we have not been able to reclaim
1761 * anything, it might because there are
1762 * no reclaimable pages under this hierarchy
1763 */
1764 if (!total)
1765 break;
1766 /*
1767 * We want to do more targeted reclaim.
1768 * excess >> 2 is not to excessive so as to
1769 * reclaim too much, nor too less that we keep
1770 * coming back to reclaim from this cgroup
1771 */
1772 if (total >= (excess >> 2) ||
1773 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1774 break;
1775 }
1776 continue;
1777 }
a9dd0a83 1778 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
ef8f2327 1779 pgdat, &nr_scanned);
0608f43d 1780 *total_scanned += nr_scanned;
3e32cb2e 1781 if (!soft_limit_excess(root_memcg))
0608f43d 1782 break;
6d61ef40 1783 }
0608f43d
AM
1784 mem_cgroup_iter_break(root_memcg, victim);
1785 return total;
6d61ef40
BS
1786}
1787
0056f4e6
JW
1788#ifdef CONFIG_LOCKDEP
1789static struct lockdep_map memcg_oom_lock_dep_map = {
1790 .name = "memcg_oom_lock",
1791};
1792#endif
1793
fb2a6fc5
JW
1794static DEFINE_SPINLOCK(memcg_oom_lock);
1795
867578cb
KH
1796/*
1797 * Check OOM-Killer is already running under our hierarchy.
1798 * If someone is running, return false.
1799 */
fb2a6fc5 1800static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 1801{
79dfdacc 1802 struct mem_cgroup *iter, *failed = NULL;
a636b327 1803
fb2a6fc5
JW
1804 spin_lock(&memcg_oom_lock);
1805
9f3a0d09 1806 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 1807 if (iter->oom_lock) {
79dfdacc
MH
1808 /*
1809 * this subtree of our hierarchy is already locked
1810 * so we cannot give a lock.
1811 */
79dfdacc 1812 failed = iter;
9f3a0d09
JW
1813 mem_cgroup_iter_break(memcg, iter);
1814 break;
23751be0
JW
1815 } else
1816 iter->oom_lock = true;
7d74b06f 1817 }
867578cb 1818
fb2a6fc5
JW
1819 if (failed) {
1820 /*
1821 * OK, we failed to lock the whole subtree so we have
1822 * to clean up what we set up to the failing subtree
1823 */
1824 for_each_mem_cgroup_tree(iter, memcg) {
1825 if (iter == failed) {
1826 mem_cgroup_iter_break(memcg, iter);
1827 break;
1828 }
1829 iter->oom_lock = false;
79dfdacc 1830 }
0056f4e6
JW
1831 } else
1832 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
1833
1834 spin_unlock(&memcg_oom_lock);
1835
1836 return !failed;
a636b327 1837}
0b7f569e 1838
fb2a6fc5 1839static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 1840{
7d74b06f
KH
1841 struct mem_cgroup *iter;
1842
fb2a6fc5 1843 spin_lock(&memcg_oom_lock);
5facae4f 1844 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
c0ff4b85 1845 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1846 iter->oom_lock = false;
fb2a6fc5 1847 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1848}
1849
c0ff4b85 1850static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1851{
1852 struct mem_cgroup *iter;
1853
c2b42d3c 1854 spin_lock(&memcg_oom_lock);
c0ff4b85 1855 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1856 iter->under_oom++;
1857 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1858}
1859
c0ff4b85 1860static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1861{
1862 struct mem_cgroup *iter;
1863
867578cb 1864 /*
7a52d4d8
ML
1865 * Be careful about under_oom underflows becase a child memcg
1866 * could have been added after mem_cgroup_mark_under_oom.
867578cb 1867 */
c2b42d3c 1868 spin_lock(&memcg_oom_lock);
c0ff4b85 1869 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1870 if (iter->under_oom > 0)
1871 iter->under_oom--;
1872 spin_unlock(&memcg_oom_lock);
0b7f569e
KH
1873}
1874
867578cb
KH
1875static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1876
dc98df5a 1877struct oom_wait_info {
d79154bb 1878 struct mem_cgroup *memcg;
ac6424b9 1879 wait_queue_entry_t wait;
dc98df5a
KH
1880};
1881
ac6424b9 1882static int memcg_oom_wake_function(wait_queue_entry_t *wait,
dc98df5a
KH
1883 unsigned mode, int sync, void *arg)
1884{
d79154bb
HD
1885 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1886 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
1887 struct oom_wait_info *oom_wait_info;
1888
1889 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 1890 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 1891
2314b42d
JW
1892 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1893 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
dc98df5a 1894 return 0;
dc98df5a
KH
1895 return autoremove_wake_function(wait, mode, sync, arg);
1896}
1897
c0ff4b85 1898static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 1899{
c2b42d3c
TH
1900 /*
1901 * For the following lockless ->under_oom test, the only required
1902 * guarantee is that it must see the state asserted by an OOM when
1903 * this function is called as a result of userland actions
1904 * triggered by the notification of the OOM. This is trivially
1905 * achieved by invoking mem_cgroup_mark_under_oom() before
1906 * triggering notification.
1907 */
1908 if (memcg && memcg->under_oom)
f4b90b70 1909 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
3c11ecf4
KH
1910}
1911
29ef680a
MH
1912enum oom_status {
1913 OOM_SUCCESS,
1914 OOM_FAILED,
1915 OOM_ASYNC,
1916 OOM_SKIPPED
1917};
1918
1919static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 1920{
7056d3a3
MH
1921 enum oom_status ret;
1922 bool locked;
1923
29ef680a
MH
1924 if (order > PAGE_ALLOC_COSTLY_ORDER)
1925 return OOM_SKIPPED;
1926
7a1adfdd
RG
1927 memcg_memory_event(memcg, MEMCG_OOM);
1928
867578cb 1929 /*
49426420
JW
1930 * We are in the middle of the charge context here, so we
1931 * don't want to block when potentially sitting on a callstack
1932 * that holds all kinds of filesystem and mm locks.
1933 *
29ef680a
MH
1934 * cgroup1 allows disabling the OOM killer and waiting for outside
1935 * handling until the charge can succeed; remember the context and put
1936 * the task to sleep at the end of the page fault when all locks are
1937 * released.
49426420 1938 *
29ef680a
MH
1939 * On the other hand, in-kernel OOM killer allows for an async victim
1940 * memory reclaim (oom_reaper) and that means that we are not solely
1941 * relying on the oom victim to make a forward progress and we can
1942 * invoke the oom killer here.
1943 *
1944 * Please note that mem_cgroup_out_of_memory might fail to find a
1945 * victim and then we have to bail out from the charge path.
867578cb 1946 */
29ef680a
MH
1947 if (memcg->oom_kill_disable) {
1948 if (!current->in_user_fault)
1949 return OOM_SKIPPED;
1950 css_get(&memcg->css);
1951 current->memcg_in_oom = memcg;
1952 current->memcg_oom_gfp_mask = mask;
1953 current->memcg_oom_order = order;
1954
1955 return OOM_ASYNC;
1956 }
1957
7056d3a3
MH
1958 mem_cgroup_mark_under_oom(memcg);
1959
1960 locked = mem_cgroup_oom_trylock(memcg);
1961
1962 if (locked)
1963 mem_cgroup_oom_notify(memcg);
1964
1965 mem_cgroup_unmark_under_oom(memcg);
29ef680a 1966 if (mem_cgroup_out_of_memory(memcg, mask, order))
7056d3a3
MH
1967 ret = OOM_SUCCESS;
1968 else
1969 ret = OOM_FAILED;
1970
1971 if (locked)
1972 mem_cgroup_oom_unlock(memcg);
29ef680a 1973
7056d3a3 1974 return ret;
3812c8c8
JW
1975}
1976
1977/**
1978 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 1979 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 1980 *
49426420
JW
1981 * This has to be called at the end of a page fault if the memcg OOM
1982 * handler was enabled.
3812c8c8 1983 *
49426420 1984 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
1985 * sleep on a waitqueue until the userspace task resolves the
1986 * situation. Sleeping directly in the charge context with all kinds
1987 * of locks held is not a good idea, instead we remember an OOM state
1988 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 1989 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
1990 *
1991 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 1992 * completed, %false otherwise.
3812c8c8 1993 */
49426420 1994bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 1995{
626ebc41 1996 struct mem_cgroup *memcg = current->memcg_in_oom;
3812c8c8 1997 struct oom_wait_info owait;
49426420 1998 bool locked;
3812c8c8
JW
1999
2000 /* OOM is global, do not handle */
3812c8c8 2001 if (!memcg)
49426420 2002 return false;
3812c8c8 2003
7c5f64f8 2004 if (!handle)
49426420 2005 goto cleanup;
3812c8c8
JW
2006
2007 owait.memcg = memcg;
2008 owait.wait.flags = 0;
2009 owait.wait.func = memcg_oom_wake_function;
2010 owait.wait.private = current;
2055da97 2011 INIT_LIST_HEAD(&owait.wait.entry);
867578cb 2012
3812c8c8 2013 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
2014 mem_cgroup_mark_under_oom(memcg);
2015
2016 locked = mem_cgroup_oom_trylock(memcg);
2017
2018 if (locked)
2019 mem_cgroup_oom_notify(memcg);
2020
2021 if (locked && !memcg->oom_kill_disable) {
2022 mem_cgroup_unmark_under_oom(memcg);
2023 finish_wait(&memcg_oom_waitq, &owait.wait);
626ebc41
TH
2024 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2025 current->memcg_oom_order);
49426420 2026 } else {
3812c8c8 2027 schedule();
49426420
JW
2028 mem_cgroup_unmark_under_oom(memcg);
2029 finish_wait(&memcg_oom_waitq, &owait.wait);
2030 }
2031
2032 if (locked) {
fb2a6fc5
JW
2033 mem_cgroup_oom_unlock(memcg);
2034 /*
2035 * There is no guarantee that an OOM-lock contender
2036 * sees the wakeups triggered by the OOM kill
2037 * uncharges. Wake any sleepers explicitely.
2038 */
2039 memcg_oom_recover(memcg);
2040 }
49426420 2041cleanup:
626ebc41 2042 current->memcg_in_oom = NULL;
3812c8c8 2043 css_put(&memcg->css);
867578cb 2044 return true;
0b7f569e
KH
2045}
2046
3d8b38eb
RG
2047/**
2048 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2049 * @victim: task to be killed by the OOM killer
2050 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2051 *
2052 * Returns a pointer to a memory cgroup, which has to be cleaned up
2053 * by killing all belonging OOM-killable tasks.
2054 *
2055 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2056 */
2057struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2058 struct mem_cgroup *oom_domain)
2059{
2060 struct mem_cgroup *oom_group = NULL;
2061 struct mem_cgroup *memcg;
2062
2063 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2064 return NULL;
2065
2066 if (!oom_domain)
2067 oom_domain = root_mem_cgroup;
2068
2069 rcu_read_lock();
2070
2071 memcg = mem_cgroup_from_task(victim);
2072 if (memcg == root_mem_cgroup)
2073 goto out;
2074
48fe267c
RG
2075 /*
2076 * If the victim task has been asynchronously moved to a different
2077 * memory cgroup, we might end up killing tasks outside oom_domain.
2078 * In this case it's better to ignore memory.group.oom.
2079 */
2080 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2081 goto out;
2082
3d8b38eb
RG
2083 /*
2084 * Traverse the memory cgroup hierarchy from the victim task's
2085 * cgroup up to the OOMing cgroup (or root) to find the
2086 * highest-level memory cgroup with oom.group set.
2087 */
2088 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2089 if (memcg->oom_group)
2090 oom_group = memcg;
2091
2092 if (memcg == oom_domain)
2093 break;
2094 }
2095
2096 if (oom_group)
2097 css_get(&oom_group->css);
2098out:
2099 rcu_read_unlock();
2100
2101 return oom_group;
2102}
2103
2104void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2105{
2106 pr_info("Tasks in ");
2107 pr_cont_cgroup_path(memcg->css.cgroup);
2108 pr_cont(" are going to be killed due to memory.oom.group set\n");
2109}
2110
d7365e78 2111/**
81f8c3a4
JW
2112 * lock_page_memcg - lock a page->mem_cgroup binding
2113 * @page: the page
32047e2a 2114 *
81f8c3a4 2115 * This function protects unlocked LRU pages from being moved to
739f79fc
JW
2116 * another cgroup.
2117 *
2118 * It ensures lifetime of the returned memcg. Caller is responsible
2119 * for the lifetime of the page; __unlock_page_memcg() is available
2120 * when @page might get freed inside the locked section.
d69b042f 2121 */
739f79fc 2122struct mem_cgroup *lock_page_memcg(struct page *page)
89c06bd5 2123{
9da7b521 2124 struct page *head = compound_head(page); /* rmap on tail pages */
89c06bd5 2125 struct mem_cgroup *memcg;
6de22619 2126 unsigned long flags;
89c06bd5 2127
6de22619
JW
2128 /*
2129 * The RCU lock is held throughout the transaction. The fast
2130 * path can get away without acquiring the memcg->move_lock
2131 * because page moving starts with an RCU grace period.
739f79fc
JW
2132 *
2133 * The RCU lock also protects the memcg from being freed when
2134 * the page state that is going to change is the only thing
2135 * preventing the page itself from being freed. E.g. writeback
2136 * doesn't hold a page reference and relies on PG_writeback to
2137 * keep off truncation, migration and so forth.
2138 */
d7365e78
JW
2139 rcu_read_lock();
2140
2141 if (mem_cgroup_disabled())
739f79fc 2142 return NULL;
89c06bd5 2143again:
9da7b521 2144 memcg = head->mem_cgroup;
29833315 2145 if (unlikely(!memcg))
739f79fc 2146 return NULL;
d7365e78 2147
bdcbb659 2148 if (atomic_read(&memcg->moving_account) <= 0)
739f79fc 2149 return memcg;
89c06bd5 2150
6de22619 2151 spin_lock_irqsave(&memcg->move_lock, flags);
9da7b521 2152 if (memcg != head->mem_cgroup) {
6de22619 2153 spin_unlock_irqrestore(&memcg->move_lock, flags);
89c06bd5
KH
2154 goto again;
2155 }
6de22619
JW
2156
2157 /*
2158 * When charge migration first begins, we can have locked and
2159 * unlocked page stat updates happening concurrently. Track
81f8c3a4 2160 * the task who has the lock for unlock_page_memcg().
6de22619
JW
2161 */
2162 memcg->move_lock_task = current;
2163 memcg->move_lock_flags = flags;
d7365e78 2164
739f79fc 2165 return memcg;
89c06bd5 2166}
81f8c3a4 2167EXPORT_SYMBOL(lock_page_memcg);
89c06bd5 2168
d7365e78 2169/**
739f79fc
JW
2170 * __unlock_page_memcg - unlock and unpin a memcg
2171 * @memcg: the memcg
2172 *
2173 * Unlock and unpin a memcg returned by lock_page_memcg().
d7365e78 2174 */
739f79fc 2175void __unlock_page_memcg(struct mem_cgroup *memcg)
89c06bd5 2176{
6de22619
JW
2177 if (memcg && memcg->move_lock_task == current) {
2178 unsigned long flags = memcg->move_lock_flags;
2179
2180 memcg->move_lock_task = NULL;
2181 memcg->move_lock_flags = 0;
2182
2183 spin_unlock_irqrestore(&memcg->move_lock, flags);
2184 }
89c06bd5 2185
d7365e78 2186 rcu_read_unlock();
89c06bd5 2187}
739f79fc
JW
2188
2189/**
2190 * unlock_page_memcg - unlock a page->mem_cgroup binding
2191 * @page: the page
2192 */
2193void unlock_page_memcg(struct page *page)
2194{
9da7b521
JW
2195 struct page *head = compound_head(page);
2196
2197 __unlock_page_memcg(head->mem_cgroup);
739f79fc 2198}
81f8c3a4 2199EXPORT_SYMBOL(unlock_page_memcg);
89c06bd5 2200
cdec2e42
KH
2201struct memcg_stock_pcp {
2202 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 2203 unsigned int nr_pages;
bf4f0599
RG
2204
2205#ifdef CONFIG_MEMCG_KMEM
2206 struct obj_cgroup *cached_objcg;
2207 unsigned int nr_bytes;
2208#endif
2209
cdec2e42 2210 struct work_struct work;
26fe6168 2211 unsigned long flags;
a0db00fc 2212#define FLUSHING_CACHED_CHARGE 0
cdec2e42
KH
2213};
2214static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad6 2215static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2216
bf4f0599
RG
2217#ifdef CONFIG_MEMCG_KMEM
2218static void drain_obj_stock(struct memcg_stock_pcp *stock);
2219static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2220 struct mem_cgroup *root_memcg);
2221
2222#else
2223static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2224{
2225}
2226static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2227 struct mem_cgroup *root_memcg)
2228{
2229 return false;
2230}
2231#endif
2232
a0956d54
SS
2233/**
2234 * consume_stock: Try to consume stocked charge on this cpu.
2235 * @memcg: memcg to consume from.
2236 * @nr_pages: how many pages to charge.
2237 *
2238 * The charges will only happen if @memcg matches the current cpu's memcg
2239 * stock, and at least @nr_pages are available in that stock. Failure to
2240 * service an allocation will refill the stock.
2241 *
2242 * returns true if successful, false otherwise.
cdec2e42 2243 */
a0956d54 2244static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2245{
2246 struct memcg_stock_pcp *stock;
db2ba40c 2247 unsigned long flags;
3e32cb2e 2248 bool ret = false;
cdec2e42 2249
a983b5eb 2250 if (nr_pages > MEMCG_CHARGE_BATCH)
3e32cb2e 2251 return ret;
a0956d54 2252
db2ba40c
JW
2253 local_irq_save(flags);
2254
2255 stock = this_cpu_ptr(&memcg_stock);
3e32cb2e 2256 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
a0956d54 2257 stock->nr_pages -= nr_pages;
3e32cb2e
JW
2258 ret = true;
2259 }
db2ba40c
JW
2260
2261 local_irq_restore(flags);
2262
cdec2e42
KH
2263 return ret;
2264}
2265
2266/*
3e32cb2e 2267 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
2268 */
2269static void drain_stock(struct memcg_stock_pcp *stock)
2270{
2271 struct mem_cgroup *old = stock->cached;
2272
1a3e1f40
JW
2273 if (!old)
2274 return;
2275
11c9ea4e 2276 if (stock->nr_pages) {
3e32cb2e 2277 page_counter_uncharge(&old->memory, stock->nr_pages);
7941d214 2278 if (do_memsw_account())
3e32cb2e 2279 page_counter_uncharge(&old->memsw, stock->nr_pages);
11c9ea4e 2280 stock->nr_pages = 0;
cdec2e42 2281 }
1a3e1f40
JW
2282
2283 css_put(&old->css);
cdec2e42 2284 stock->cached = NULL;
cdec2e42
KH
2285}
2286
cdec2e42
KH
2287static void drain_local_stock(struct work_struct *dummy)
2288{
db2ba40c
JW
2289 struct memcg_stock_pcp *stock;
2290 unsigned long flags;
2291
72f0184c
MH
2292 /*
2293 * The only protection from memory hotplug vs. drain_stock races is
2294 * that we always operate on local CPU stock here with IRQ disabled
2295 */
db2ba40c
JW
2296 local_irq_save(flags);
2297
2298 stock = this_cpu_ptr(&memcg_stock);
bf4f0599 2299 drain_obj_stock(stock);
cdec2e42 2300 drain_stock(stock);
26fe6168 2301 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
db2ba40c
JW
2302
2303 local_irq_restore(flags);
cdec2e42
KH
2304}
2305
2306/*
3e32cb2e 2307 * Cache charges(val) to local per_cpu area.
320cc51d 2308 * This will be consumed by consume_stock() function, later.
cdec2e42 2309 */
c0ff4b85 2310static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42 2311{
db2ba40c
JW
2312 struct memcg_stock_pcp *stock;
2313 unsigned long flags;
2314
2315 local_irq_save(flags);
cdec2e42 2316
db2ba40c 2317 stock = this_cpu_ptr(&memcg_stock);
c0ff4b85 2318 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 2319 drain_stock(stock);
1a3e1f40 2320 css_get(&memcg->css);
c0ff4b85 2321 stock->cached = memcg;
cdec2e42 2322 }
11c9ea4e 2323 stock->nr_pages += nr_pages;
db2ba40c 2324
a983b5eb 2325 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
475d0487
RG
2326 drain_stock(stock);
2327
db2ba40c 2328 local_irq_restore(flags);
cdec2e42
KH
2329}
2330
2331/*
c0ff4b85 2332 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 2333 * of the hierarchy under it.
cdec2e42 2334 */
6d3d6aa2 2335static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 2336{
26fe6168 2337 int cpu, curcpu;
d38144b7 2338
6d3d6aa2
JW
2339 /* If someone's already draining, avoid adding running more workers. */
2340 if (!mutex_trylock(&percpu_charge_mutex))
2341 return;
72f0184c
MH
2342 /*
2343 * Notify other cpus that system-wide "drain" is running
2344 * We do not care about races with the cpu hotplug because cpu down
2345 * as well as workers from this path always operate on the local
2346 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2347 */
5af12d0e 2348 curcpu = get_cpu();
cdec2e42
KH
2349 for_each_online_cpu(cpu) {
2350 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2351 struct mem_cgroup *memcg;
e1a366be 2352 bool flush = false;
26fe6168 2353
e1a366be 2354 rcu_read_lock();
c0ff4b85 2355 memcg = stock->cached;
e1a366be
RG
2356 if (memcg && stock->nr_pages &&
2357 mem_cgroup_is_descendant(memcg, root_memcg))
2358 flush = true;
bf4f0599
RG
2359 if (obj_stock_flush_required(stock, root_memcg))
2360 flush = true;
e1a366be
RG
2361 rcu_read_unlock();
2362
2363 if (flush &&
2364 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
d1a05b69
MH
2365 if (cpu == curcpu)
2366 drain_local_stock(&stock->work);
2367 else
2368 schedule_work_on(cpu, &stock->work);
2369 }
cdec2e42 2370 }
5af12d0e 2371 put_cpu();
9f50fad6 2372 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2373}
2374
308167fc 2375static int memcg_hotplug_cpu_dead(unsigned int cpu)
cdec2e42 2376{
cdec2e42 2377 struct memcg_stock_pcp *stock;
42a30035 2378 struct mem_cgroup *memcg, *mi;
cdec2e42 2379
cdec2e42
KH
2380 stock = &per_cpu(memcg_stock, cpu);
2381 drain_stock(stock);
a983b5eb
JW
2382
2383 for_each_mem_cgroup(memcg) {
2384 int i;
2385
2386 for (i = 0; i < MEMCG_NR_STAT; i++) {
2387 int nid;
2388 long x;
2389
871789d4 2390 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
815744d7 2391 if (x)
42a30035
JW
2392 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2393 atomic_long_add(x, &memcg->vmstats[i]);
a983b5eb
JW
2394
2395 if (i >= NR_VM_NODE_STAT_ITEMS)
2396 continue;
2397
2398 for_each_node(nid) {
2399 struct mem_cgroup_per_node *pn;
2400
2401 pn = mem_cgroup_nodeinfo(memcg, nid);
2402 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
815744d7 2403 if (x)
42a30035
JW
2404 do {
2405 atomic_long_add(x, &pn->lruvec_stat[i]);
2406 } while ((pn = parent_nodeinfo(pn, nid)));
a983b5eb
JW
2407 }
2408 }
2409
e27be240 2410 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
a983b5eb
JW
2411 long x;
2412
871789d4 2413 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
815744d7 2414 if (x)
42a30035
JW
2415 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2416 atomic_long_add(x, &memcg->vmevents[i]);
a983b5eb
JW
2417 }
2418 }
2419
308167fc 2420 return 0;
cdec2e42
KH
2421}
2422
b3ff9291
CD
2423static unsigned long reclaim_high(struct mem_cgroup *memcg,
2424 unsigned int nr_pages,
2425 gfp_t gfp_mask)
f7e1cb6e 2426{
b3ff9291
CD
2427 unsigned long nr_reclaimed = 0;
2428
f7e1cb6e 2429 do {
e22c6ed9
JW
2430 unsigned long pflags;
2431
d1663a90
JK
2432 if (page_counter_read(&memcg->memory) <=
2433 READ_ONCE(memcg->memory.high))
f7e1cb6e 2434 continue;
e22c6ed9 2435
e27be240 2436 memcg_memory_event(memcg, MEMCG_HIGH);
e22c6ed9
JW
2437
2438 psi_memstall_enter(&pflags);
b3ff9291
CD
2439 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2440 gfp_mask, true);
e22c6ed9 2441 psi_memstall_leave(&pflags);
4bf17307
CD
2442 } while ((memcg = parent_mem_cgroup(memcg)) &&
2443 !mem_cgroup_is_root(memcg));
b3ff9291
CD
2444
2445 return nr_reclaimed;
f7e1cb6e
JW
2446}
2447
2448static void high_work_func(struct work_struct *work)
2449{
2450 struct mem_cgroup *memcg;
2451
2452 memcg = container_of(work, struct mem_cgroup, high_work);
a983b5eb 2453 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
f7e1cb6e
JW
2454}
2455
0e4b01df
CD
2456/*
2457 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2458 * enough to still cause a significant slowdown in most cases, while still
2459 * allowing diagnostics and tracing to proceed without becoming stuck.
2460 */
2461#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2462
2463/*
2464 * When calculating the delay, we use these either side of the exponentiation to
2465 * maintain precision and scale to a reasonable number of jiffies (see the table
2466 * below.
2467 *
2468 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2469 * overage ratio to a delay.
ac5ddd0f 2470 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
0e4b01df
CD
2471 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2472 * to produce a reasonable delay curve.
2473 *
2474 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2475 * reasonable delay curve compared to precision-adjusted overage, not
2476 * penalising heavily at first, but still making sure that growth beyond the
2477 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2478 * example, with a high of 100 megabytes:
2479 *
2480 * +-------+------------------------+
2481 * | usage | time to allocate in ms |
2482 * +-------+------------------------+
2483 * | 100M | 0 |
2484 * | 101M | 6 |
2485 * | 102M | 25 |
2486 * | 103M | 57 |
2487 * | 104M | 102 |
2488 * | 105M | 159 |
2489 * | 106M | 230 |
2490 * | 107M | 313 |
2491 * | 108M | 409 |
2492 * | 109M | 518 |
2493 * | 110M | 639 |
2494 * | 111M | 774 |
2495 * | 112M | 921 |
2496 * | 113M | 1081 |
2497 * | 114M | 1254 |
2498 * | 115M | 1439 |
2499 * | 116M | 1638 |
2500 * | 117M | 1849 |
2501 * | 118M | 2000 |
2502 * | 119M | 2000 |
2503 * | 120M | 2000 |
2504 * +-------+------------------------+
2505 */
2506 #define MEMCG_DELAY_PRECISION_SHIFT 20
2507 #define MEMCG_DELAY_SCALING_SHIFT 14
2508
8a5dbc65 2509static u64 calculate_overage(unsigned long usage, unsigned long high)
b23afb93 2510{
8a5dbc65 2511 u64 overage;
b23afb93 2512
8a5dbc65
JK
2513 if (usage <= high)
2514 return 0;
e26733e0 2515
8a5dbc65
JK
2516 /*
2517 * Prevent division by 0 in overage calculation by acting as if
2518 * it was a threshold of 1 page
2519 */
2520 high = max(high, 1UL);
9b8b1754 2521
8a5dbc65
JK
2522 overage = usage - high;
2523 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2524 return div64_u64(overage, high);
2525}
e26733e0 2526
8a5dbc65
JK
2527static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2528{
2529 u64 overage, max_overage = 0;
e26733e0 2530
8a5dbc65
JK
2531 do {
2532 overage = calculate_overage(page_counter_read(&memcg->memory),
d1663a90 2533 READ_ONCE(memcg->memory.high));
8a5dbc65 2534 max_overage = max(overage, max_overage);
e26733e0
CD
2535 } while ((memcg = parent_mem_cgroup(memcg)) &&
2536 !mem_cgroup_is_root(memcg));
2537
8a5dbc65
JK
2538 return max_overage;
2539}
2540
4b82ab4f
JK
2541static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2542{
2543 u64 overage, max_overage = 0;
2544
2545 do {
2546 overage = calculate_overage(page_counter_read(&memcg->swap),
2547 READ_ONCE(memcg->swap.high));
2548 if (overage)
2549 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2550 max_overage = max(overage, max_overage);
2551 } while ((memcg = parent_mem_cgroup(memcg)) &&
2552 !mem_cgroup_is_root(memcg));
2553
2554 return max_overage;
2555}
2556
8a5dbc65
JK
2557/*
2558 * Get the number of jiffies that we should penalise a mischievous cgroup which
2559 * is exceeding its memory.high by checking both it and its ancestors.
2560 */
2561static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2562 unsigned int nr_pages,
2563 u64 max_overage)
2564{
2565 unsigned long penalty_jiffies;
2566
e26733e0
CD
2567 if (!max_overage)
2568 return 0;
0e4b01df
CD
2569
2570 /*
0e4b01df
CD
2571 * We use overage compared to memory.high to calculate the number of
2572 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2573 * fairly lenient on small overages, and increasingly harsh when the
2574 * memcg in question makes it clear that it has no intention of stopping
2575 * its crazy behaviour, so we exponentially increase the delay based on
2576 * overage amount.
2577 */
e26733e0
CD
2578 penalty_jiffies = max_overage * max_overage * HZ;
2579 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2580 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
0e4b01df
CD
2581
2582 /*
2583 * Factor in the task's own contribution to the overage, such that four
2584 * N-sized allocations are throttled approximately the same as one
2585 * 4N-sized allocation.
2586 *
2587 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2588 * larger the current charge patch is than that.
2589 */
ff144e69 2590 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
e26733e0
CD
2591}
2592
2593/*
2594 * Scheduled by try_charge() to be executed from the userland return path
2595 * and reclaims memory over the high limit.
2596 */
2597void mem_cgroup_handle_over_high(void)
2598{
2599 unsigned long penalty_jiffies;
2600 unsigned long pflags;
b3ff9291 2601 unsigned long nr_reclaimed;
e26733e0 2602 unsigned int nr_pages = current->memcg_nr_pages_over_high;
d977aa93 2603 int nr_retries = MAX_RECLAIM_RETRIES;
e26733e0 2604 struct mem_cgroup *memcg;
b3ff9291 2605 bool in_retry = false;
e26733e0
CD
2606
2607 if (likely(!nr_pages))
2608 return;
2609
2610 memcg = get_mem_cgroup_from_mm(current->mm);
e26733e0
CD
2611 current->memcg_nr_pages_over_high = 0;
2612
b3ff9291
CD
2613retry_reclaim:
2614 /*
2615 * The allocating task should reclaim at least the batch size, but for
2616 * subsequent retries we only want to do what's necessary to prevent oom
2617 * or breaching resource isolation.
2618 *
2619 * This is distinct from memory.max or page allocator behaviour because
2620 * memory.high is currently batched, whereas memory.max and the page
2621 * allocator run every time an allocation is made.
2622 */
2623 nr_reclaimed = reclaim_high(memcg,
2624 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2625 GFP_KERNEL);
2626
e26733e0
CD
2627 /*
2628 * memory.high is breached and reclaim is unable to keep up. Throttle
2629 * allocators proactively to slow down excessive growth.
2630 */
8a5dbc65
JK
2631 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2632 mem_find_max_overage(memcg));
0e4b01df 2633
4b82ab4f
JK
2634 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2635 swap_find_max_overage(memcg));
2636
ff144e69
JK
2637 /*
2638 * Clamp the max delay per usermode return so as to still keep the
2639 * application moving forwards and also permit diagnostics, albeit
2640 * extremely slowly.
2641 */
2642 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2643
0e4b01df
CD
2644 /*
2645 * Don't sleep if the amount of jiffies this memcg owes us is so low
2646 * that it's not even worth doing, in an attempt to be nice to those who
2647 * go only a small amount over their memory.high value and maybe haven't
2648 * been aggressively reclaimed enough yet.
2649 */
2650 if (penalty_jiffies <= HZ / 100)
2651 goto out;
2652
b3ff9291
CD
2653 /*
2654 * If reclaim is making forward progress but we're still over
2655 * memory.high, we want to encourage that rather than doing allocator
2656 * throttling.
2657 */
2658 if (nr_reclaimed || nr_retries--) {
2659 in_retry = true;
2660 goto retry_reclaim;
2661 }
2662
0e4b01df
CD
2663 /*
2664 * If we exit early, we're guaranteed to die (since
2665 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2666 * need to account for any ill-begotten jiffies to pay them off later.
2667 */
2668 psi_memstall_enter(&pflags);
2669 schedule_timeout_killable(penalty_jiffies);
2670 psi_memstall_leave(&pflags);
2671
2672out:
2673 css_put(&memcg->css);
b23afb93
TH
2674}
2675
00501b53
JW
2676static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2677 unsigned int nr_pages)
8a9f3ccd 2678{
a983b5eb 2679 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
d977aa93 2680 int nr_retries = MAX_RECLAIM_RETRIES;
6539cc05 2681 struct mem_cgroup *mem_over_limit;
3e32cb2e 2682 struct page_counter *counter;
e22c6ed9 2683 enum oom_status oom_status;
6539cc05 2684 unsigned long nr_reclaimed;
b70a2a21
JW
2685 bool may_swap = true;
2686 bool drained = false;
e22c6ed9 2687 unsigned long pflags;
a636b327 2688
ce00a967 2689 if (mem_cgroup_is_root(memcg))
10d53c74 2690 return 0;
6539cc05 2691retry:
b6b6cc72 2692 if (consume_stock(memcg, nr_pages))
10d53c74 2693 return 0;
8a9f3ccd 2694
7941d214 2695 if (!do_memsw_account() ||
6071ca52
JW
2696 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2697 if (page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 2698 goto done_restock;
7941d214 2699 if (do_memsw_account())
3e32cb2e
JW
2700 page_counter_uncharge(&memcg->memsw, batch);
2701 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 2702 } else {
3e32cb2e 2703 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
b70a2a21 2704 may_swap = false;
3fbe7244 2705 }
7a81b88c 2706
6539cc05
JW
2707 if (batch > nr_pages) {
2708 batch = nr_pages;
2709 goto retry;
2710 }
6d61ef40 2711
869712fd
JW
2712 /*
2713 * Memcg doesn't have a dedicated reserve for atomic
2714 * allocations. But like the global atomic pool, we need to
2715 * put the burden of reclaim on regular allocation requests
2716 * and let these go through as privileged allocations.
2717 */
2718 if (gfp_mask & __GFP_ATOMIC)
2719 goto force;
2720
06b078fc
JW
2721 /*
2722 * Unlike in global OOM situations, memcg is not in a physical
2723 * memory shortage. Allow dying and OOM-killed tasks to
2724 * bypass the last charges so that they can exit quickly and
2725 * free their memory.
2726 */
7775face 2727 if (unlikely(should_force_charge()))
10d53c74 2728 goto force;
06b078fc 2729
89a28483
JW
2730 /*
2731 * Prevent unbounded recursion when reclaim operations need to
2732 * allocate memory. This might exceed the limits temporarily,
2733 * but we prefer facilitating memory reclaim and getting back
2734 * under the limit over triggering OOM kills in these cases.
2735 */
2736 if (unlikely(current->flags & PF_MEMALLOC))
2737 goto force;
2738
06b078fc
JW
2739 if (unlikely(task_in_memcg_oom(current)))
2740 goto nomem;
2741
d0164adc 2742 if (!gfpflags_allow_blocking(gfp_mask))
6539cc05 2743 goto nomem;
4b534334 2744
e27be240 2745 memcg_memory_event(mem_over_limit, MEMCG_MAX);
241994ed 2746
e22c6ed9 2747 psi_memstall_enter(&pflags);
b70a2a21
JW
2748 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2749 gfp_mask, may_swap);
e22c6ed9 2750 psi_memstall_leave(&pflags);
6539cc05 2751
61e02c74 2752 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 2753 goto retry;
28c34c29 2754
b70a2a21 2755 if (!drained) {
6d3d6aa2 2756 drain_all_stock(mem_over_limit);
b70a2a21
JW
2757 drained = true;
2758 goto retry;
2759 }
2760
28c34c29
JW
2761 if (gfp_mask & __GFP_NORETRY)
2762 goto nomem;
6539cc05
JW
2763 /*
2764 * Even though the limit is exceeded at this point, reclaim
2765 * may have been able to free some pages. Retry the charge
2766 * before killing the task.
2767 *
2768 * Only for regular pages, though: huge pages are rather
2769 * unlikely to succeed so close to the limit, and we fall back
2770 * to regular pages anyway in case of failure.
2771 */
61e02c74 2772 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
2773 goto retry;
2774 /*
2775 * At task move, charge accounts can be doubly counted. So, it's
2776 * better to wait until the end of task_move if something is going on.
2777 */
2778 if (mem_cgroup_wait_acct_move(mem_over_limit))
2779 goto retry;
2780
9b130619
JW
2781 if (nr_retries--)
2782 goto retry;
2783
38d38493 2784 if (gfp_mask & __GFP_RETRY_MAYFAIL)
29ef680a
MH
2785 goto nomem;
2786
06b078fc 2787 if (gfp_mask & __GFP_NOFAIL)
10d53c74 2788 goto force;
06b078fc 2789
6539cc05 2790 if (fatal_signal_pending(current))
10d53c74 2791 goto force;
6539cc05 2792
29ef680a
MH
2793 /*
2794 * keep retrying as long as the memcg oom killer is able to make
2795 * a forward progress or bypass the charge if the oom killer
2796 * couldn't make any progress.
2797 */
2798 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
3608de07 2799 get_order(nr_pages * PAGE_SIZE));
29ef680a
MH
2800 switch (oom_status) {
2801 case OOM_SUCCESS:
d977aa93 2802 nr_retries = MAX_RECLAIM_RETRIES;
29ef680a
MH
2803 goto retry;
2804 case OOM_FAILED:
2805 goto force;
2806 default:
2807 goto nomem;
2808 }
7a81b88c 2809nomem:
6d1fdc48 2810 if (!(gfp_mask & __GFP_NOFAIL))
3168ecbe 2811 return -ENOMEM;
10d53c74
TH
2812force:
2813 /*
2814 * The allocation either can't fail or will lead to more memory
2815 * being freed very soon. Allow memory usage go over the limit
2816 * temporarily by force charging it.
2817 */
2818 page_counter_charge(&memcg->memory, nr_pages);
7941d214 2819 if (do_memsw_account())
10d53c74 2820 page_counter_charge(&memcg->memsw, nr_pages);
10d53c74
TH
2821
2822 return 0;
6539cc05
JW
2823
2824done_restock:
2825 if (batch > nr_pages)
2826 refill_stock(memcg, batch - nr_pages);
b23afb93 2827
241994ed 2828 /*
b23afb93
TH
2829 * If the hierarchy is above the normal consumption range, schedule
2830 * reclaim on returning to userland. We can perform reclaim here
71baba4b 2831 * if __GFP_RECLAIM but let's always punt for simplicity and so that
b23afb93
TH
2832 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2833 * not recorded as it most likely matches current's and won't
2834 * change in the meantime. As high limit is checked again before
2835 * reclaim, the cost of mismatch is negligible.
241994ed
JW
2836 */
2837 do {
4b82ab4f
JK
2838 bool mem_high, swap_high;
2839
2840 mem_high = page_counter_read(&memcg->memory) >
2841 READ_ONCE(memcg->memory.high);
2842 swap_high = page_counter_read(&memcg->swap) >
2843 READ_ONCE(memcg->swap.high);
2844
2845 /* Don't bother a random interrupted task */
2846 if (in_interrupt()) {
2847 if (mem_high) {
f7e1cb6e
JW
2848 schedule_work(&memcg->high_work);
2849 break;
2850 }
4b82ab4f
JK
2851 continue;
2852 }
2853
2854 if (mem_high || swap_high) {
2855 /*
2856 * The allocating tasks in this cgroup will need to do
2857 * reclaim or be throttled to prevent further growth
2858 * of the memory or swap footprints.
2859 *
2860 * Target some best-effort fairness between the tasks,
2861 * and distribute reclaim work and delay penalties
2862 * based on how much each task is actually allocating.
2863 */
9516a18a 2864 current->memcg_nr_pages_over_high += batch;
b23afb93
TH
2865 set_notify_resume(current);
2866 break;
2867 }
241994ed 2868 } while ((memcg = parent_mem_cgroup(memcg)));
10d53c74
TH
2869
2870 return 0;
7a81b88c 2871}
8a9f3ccd 2872
f0e45fb4 2873#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
00501b53 2874static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 2875{
ce00a967
JW
2876 if (mem_cgroup_is_root(memcg))
2877 return;
2878
3e32cb2e 2879 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 2880 if (do_memsw_account())
3e32cb2e 2881 page_counter_uncharge(&memcg->memsw, nr_pages);
d01dd17f 2882}
f0e45fb4 2883#endif
d01dd17f 2884
d9eb1ea2 2885static void commit_charge(struct page *page, struct mem_cgroup *memcg)
0a31bc97 2886{
1306a85a 2887 VM_BUG_ON_PAGE(page->mem_cgroup, page);
0a31bc97 2888 /*
a0b5b414 2889 * Any of the following ensures page->mem_cgroup stability:
0a31bc97 2890 *
a0b5b414
JW
2891 * - the page lock
2892 * - LRU isolation
2893 * - lock_page_memcg()
2894 * - exclusive reference
0a31bc97 2895 */
1306a85a 2896 page->mem_cgroup = memcg;
7a81b88c 2897}
66e1707b 2898
84c07d11 2899#ifdef CONFIG_MEMCG_KMEM
10befea9
RG
2900int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2901 gfp_t gfp)
2902{
2903 unsigned int objects = objs_per_slab_page(s, page);
2904 void *vec;
2905
2906 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2907 page_to_nid(page));
2908 if (!vec)
2909 return -ENOMEM;
2910
2911 if (cmpxchg(&page->obj_cgroups, NULL,
2912 (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2913 kfree(vec);
2914 else
2915 kmemleak_not_leak(vec);
2916
2917 return 0;
2918}
2919
8380ce47
RG
2920/*
2921 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2922 *
2923 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2924 * cgroup_mutex, etc.
2925 */
2926struct mem_cgroup *mem_cgroup_from_obj(void *p)
2927{
2928 struct page *page;
2929
2930 if (mem_cgroup_disabled())
2931 return NULL;
2932
2933 page = virt_to_head_page(p);
2934
19b629c9
RG
2935 /*
2936 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
2937 * or a pointer to obj_cgroup vector. In the latter case the lowest
2938 * bit of the pointer is set.
2939 * The page->mem_cgroup pointer can be asynchronously changed
2940 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
2941 * from a valid memcg pointer to objcg vector or back.
2942 */
2943 if (!page->mem_cgroup)
2944 return NULL;
2945
8380ce47 2946 /*
9855609b
RG
2947 * Slab objects are accounted individually, not per-page.
2948 * Memcg membership data for each individual object is saved in
2949 * the page->obj_cgroups.
8380ce47 2950 */
9855609b
RG
2951 if (page_has_obj_cgroups(page)) {
2952 struct obj_cgroup *objcg;
2953 unsigned int off;
2954
2955 off = obj_to_index(page->slab_cache, page, p);
2956 objcg = page_obj_cgroups(page)[off];
10befea9
RG
2957 if (objcg)
2958 return obj_cgroup_memcg(objcg);
2959
2960 return NULL;
9855609b 2961 }
8380ce47
RG
2962
2963 /* All other pages use page->mem_cgroup */
2964 return page->mem_cgroup;
2965}
2966
bf4f0599
RG
2967__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2968{
2969 struct obj_cgroup *objcg = NULL;
2970 struct mem_cgroup *memcg;
2971
279c3393
RG
2972 if (memcg_kmem_bypass())
2973 return NULL;
2974
bf4f0599 2975 rcu_read_lock();
37d5985c
RG
2976 if (unlikely(active_memcg()))
2977 memcg = active_memcg();
bf4f0599
RG
2978 else
2979 memcg = mem_cgroup_from_task(current);
2980
2981 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2982 objcg = rcu_dereference(memcg->objcg);
2983 if (objcg && obj_cgroup_tryget(objcg))
2984 break;
2985 }
2986 rcu_read_unlock();
2987
2988 return objcg;
2989}
2990
f3bb3043 2991static int memcg_alloc_cache_id(void)
55007d84 2992{
f3bb3043
VD
2993 int id, size;
2994 int err;
2995
dbcf73e2 2996 id = ida_simple_get(&memcg_cache_ida,
f3bb3043
VD
2997 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2998 if (id < 0)
2999 return id;
55007d84 3000
dbcf73e2 3001 if (id < memcg_nr_cache_ids)
f3bb3043
VD
3002 return id;
3003
3004 /*
3005 * There's no space for the new id in memcg_caches arrays,
3006 * so we have to grow them.
3007 */
05257a1a 3008 down_write(&memcg_cache_ids_sem);
f3bb3043
VD
3009
3010 size = 2 * (id + 1);
55007d84
GC
3011 if (size < MEMCG_CACHES_MIN_SIZE)
3012 size = MEMCG_CACHES_MIN_SIZE;
3013 else if (size > MEMCG_CACHES_MAX_SIZE)
3014 size = MEMCG_CACHES_MAX_SIZE;
3015
9855609b 3016 err = memcg_update_all_list_lrus(size);
05257a1a
VD
3017 if (!err)
3018 memcg_nr_cache_ids = size;
3019
3020 up_write(&memcg_cache_ids_sem);
3021
f3bb3043 3022 if (err) {
dbcf73e2 3023 ida_simple_remove(&memcg_cache_ida, id);
f3bb3043
VD
3024 return err;
3025 }
3026 return id;
3027}
3028
3029static void memcg_free_cache_id(int id)
3030{
dbcf73e2 3031 ida_simple_remove(&memcg_cache_ida, id);
55007d84
GC
3032}
3033
45264778 3034/**
4b13f64d 3035 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
10eaec2f 3036 * @memcg: memory cgroup to charge
45264778 3037 * @gfp: reclaim mode
92d0510c 3038 * @nr_pages: number of pages to charge
45264778
VD
3039 *
3040 * Returns 0 on success, an error code on failure.
3041 */
4b13f64d
RG
3042int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
3043 unsigned int nr_pages)
7ae1e1d0 3044{
f3ccb2c4 3045 struct page_counter *counter;
7ae1e1d0
GC
3046 int ret;
3047
f3ccb2c4 3048 ret = try_charge(memcg, gfp, nr_pages);
52c29b04 3049 if (ret)
f3ccb2c4 3050 return ret;
52c29b04
JW
3051
3052 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3053 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
e55d9d9b
MH
3054
3055 /*
3056 * Enforce __GFP_NOFAIL allocation because callers are not
3057 * prepared to see failures and likely do not have any failure
3058 * handling code.
3059 */
3060 if (gfp & __GFP_NOFAIL) {
3061 page_counter_charge(&memcg->kmem, nr_pages);
3062 return 0;
3063 }
52c29b04
JW
3064 cancel_charge(memcg, nr_pages);
3065 return -ENOMEM;
7ae1e1d0 3066 }
f3ccb2c4 3067 return 0;
7ae1e1d0
GC
3068}
3069
4b13f64d
RG
3070/**
3071 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3072 * @memcg: memcg to uncharge
3073 * @nr_pages: number of pages to uncharge
3074 */
3075void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3076{
3077 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3078 page_counter_uncharge(&memcg->kmem, nr_pages);
3079
3080 page_counter_uncharge(&memcg->memory, nr_pages);
3081 if (do_memsw_account())
3082 page_counter_uncharge(&memcg->memsw, nr_pages);
3083}
3084
45264778 3085/**
f4b00eab 3086 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
45264778
VD
3087 * @page: page to charge
3088 * @gfp: reclaim mode
3089 * @order: allocation order
3090 *
3091 * Returns 0 on success, an error code on failure.
3092 */
f4b00eab 3093int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
7ae1e1d0 3094{
f3ccb2c4 3095 struct mem_cgroup *memcg;
fcff7d7e 3096 int ret = 0;
7ae1e1d0 3097
d46eb14b 3098 memcg = get_mem_cgroup_from_current();
279c3393 3099 if (memcg && !mem_cgroup_is_root(memcg)) {
4b13f64d 3100 ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
4d96ba35
RG
3101 if (!ret) {
3102 page->mem_cgroup = memcg;
c4159a75 3103 __SetPageKmemcg(page);
1a3e1f40 3104 return 0;
4d96ba35 3105 }
279c3393 3106 css_put(&memcg->css);
c4159a75 3107 }
d05e83a6 3108 return ret;
7ae1e1d0 3109}
49a18eae 3110
45264778 3111/**
f4b00eab 3112 * __memcg_kmem_uncharge_page: uncharge a kmem page
45264778
VD
3113 * @page: page to uncharge
3114 * @order: allocation order
3115 */
f4b00eab 3116void __memcg_kmem_uncharge_page(struct page *page, int order)
7ae1e1d0 3117{
1306a85a 3118 struct mem_cgroup *memcg = page->mem_cgroup;
f3ccb2c4 3119 unsigned int nr_pages = 1 << order;
7ae1e1d0 3120
7ae1e1d0
GC
3121 if (!memcg)
3122 return;
3123
309381fe 3124 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
4b13f64d 3125 __memcg_kmem_uncharge(memcg, nr_pages);
1306a85a 3126 page->mem_cgroup = NULL;
1a3e1f40 3127 css_put(&memcg->css);
c4159a75
VD
3128
3129 /* slab pages do not have PageKmemcg flag set */
3130 if (PageKmemcg(page))
3131 __ClearPageKmemcg(page);
60d3fd32 3132}
bf4f0599
RG
3133
3134static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3135{
3136 struct memcg_stock_pcp *stock;
3137 unsigned long flags;
3138 bool ret = false;
3139
3140 local_irq_save(flags);
3141
3142 stock = this_cpu_ptr(&memcg_stock);
3143 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3144 stock->nr_bytes -= nr_bytes;
3145 ret = true;
3146 }
3147
3148 local_irq_restore(flags);
3149
3150 return ret;
3151}
3152
3153static void drain_obj_stock(struct memcg_stock_pcp *stock)
3154{
3155 struct obj_cgroup *old = stock->cached_objcg;
3156
3157 if (!old)
3158 return;
3159
3160 if (stock->nr_bytes) {
3161 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3162 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3163
3164 if (nr_pages) {
3165 rcu_read_lock();
3166 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3167 rcu_read_unlock();
3168 }
3169
3170 /*
3171 * The leftover is flushed to the centralized per-memcg value.
3172 * On the next attempt to refill obj stock it will be moved
3173 * to a per-cpu stock (probably, on an other CPU), see
3174 * refill_obj_stock().
3175 *
3176 * How often it's flushed is a trade-off between the memory
3177 * limit enforcement accuracy and potential CPU contention,
3178 * so it might be changed in the future.
3179 */
3180 atomic_add(nr_bytes, &old->nr_charged_bytes);
3181 stock->nr_bytes = 0;
3182 }
3183
3184 obj_cgroup_put(old);
3185 stock->cached_objcg = NULL;
3186}
3187
3188static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3189 struct mem_cgroup *root_memcg)
3190{
3191 struct mem_cgroup *memcg;
3192
3193 if (stock->cached_objcg) {
3194 memcg = obj_cgroup_memcg(stock->cached_objcg);
3195 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3196 return true;
3197 }
3198
3199 return false;
3200}
3201
3202static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3203{
3204 struct memcg_stock_pcp *stock;
3205 unsigned long flags;
3206
3207 local_irq_save(flags);
3208
3209 stock = this_cpu_ptr(&memcg_stock);
3210 if (stock->cached_objcg != objcg) { /* reset if necessary */
3211 drain_obj_stock(stock);
3212 obj_cgroup_get(objcg);
3213 stock->cached_objcg = objcg;
3214 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3215 }
3216 stock->nr_bytes += nr_bytes;
3217
3218 if (stock->nr_bytes > PAGE_SIZE)
3219 drain_obj_stock(stock);
3220
3221 local_irq_restore(flags);
3222}
3223
3224int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3225{
3226 struct mem_cgroup *memcg;
3227 unsigned int nr_pages, nr_bytes;
3228 int ret;
3229
3230 if (consume_obj_stock(objcg, size))
3231 return 0;
3232
3233 /*
3234 * In theory, memcg->nr_charged_bytes can have enough
3235 * pre-charged bytes to satisfy the allocation. However,
3236 * flushing memcg->nr_charged_bytes requires two atomic
3237 * operations, and memcg->nr_charged_bytes can't be big,
3238 * so it's better to ignore it and try grab some new pages.
3239 * memcg->nr_charged_bytes will be flushed in
3240 * refill_obj_stock(), called from this function or
3241 * independently later.
3242 */
3243 rcu_read_lock();
3244 memcg = obj_cgroup_memcg(objcg);
3245 css_get(&memcg->css);
3246 rcu_read_unlock();
3247
3248 nr_pages = size >> PAGE_SHIFT;
3249 nr_bytes = size & (PAGE_SIZE - 1);
3250
3251 if (nr_bytes)
3252 nr_pages += 1;
3253
3254 ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3255 if (!ret && nr_bytes)
3256 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3257
3258 css_put(&memcg->css);
3259 return ret;
3260}
3261
3262void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3263{
3264 refill_obj_stock(objcg, size);
3265}
3266
84c07d11 3267#endif /* CONFIG_MEMCG_KMEM */
7ae1e1d0 3268
ca3e0214
KH
3269#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3270
ca3e0214
KH
3271/*
3272 * Because tail pages are not marked as "used", set it. We're under
f4b7e272 3273 * pgdat->lru_lock and migration entries setup in all page mappings.
ca3e0214 3274 */
e94c8a9c 3275void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214 3276{
1a3e1f40 3277 struct mem_cgroup *memcg = head->mem_cgroup;
e94c8a9c 3278 int i;
ca3e0214 3279
3d37c4a9
KH
3280 if (mem_cgroup_disabled())
3281 return;
b070e65c 3282
1a3e1f40
JW
3283 for (i = 1; i < HPAGE_PMD_NR; i++) {
3284 css_get(&memcg->css);
3285 head[i].mem_cgroup = memcg;
3286 }
ca3e0214 3287}
12d27107 3288#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
ca3e0214 3289
c255a458 3290#ifdef CONFIG_MEMCG_SWAP
02491447
DN
3291/**
3292 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3293 * @entry: swap entry to be moved
3294 * @from: mem_cgroup which the entry is moved from
3295 * @to: mem_cgroup which the entry is moved to
3296 *
3297 * It succeeds only when the swap_cgroup's record for this entry is the same
3298 * as the mem_cgroup's id of @from.
3299 *
3300 * Returns 0 on success, -EINVAL on failure.
3301 *
3e32cb2e 3302 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
3303 * both res and memsw, and called css_get().
3304 */
3305static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3306 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3307{
3308 unsigned short old_id, new_id;
3309
34c00c31
LZ
3310 old_id = mem_cgroup_id(from);
3311 new_id = mem_cgroup_id(to);
02491447
DN
3312
3313 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
c9019e9b
JW
3314 mod_memcg_state(from, MEMCG_SWAP, -1);
3315 mod_memcg_state(to, MEMCG_SWAP, 1);
02491447
DN
3316 return 0;
3317 }
3318 return -EINVAL;
3319}
3320#else
3321static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3322 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3323{
3324 return -EINVAL;
3325}
8c7c6e34 3326#endif
d13d1443 3327
bbec2e15 3328static DEFINE_MUTEX(memcg_max_mutex);
f212ad7c 3329
bbec2e15
RG
3330static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3331 unsigned long max, bool memsw)
628f4235 3332{
3e32cb2e 3333 bool enlarge = false;
bb4a7ea2 3334 bool drained = false;
3e32cb2e 3335 int ret;
c054a78c
YZ
3336 bool limits_invariant;
3337 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
81d39c20 3338
3e32cb2e 3339 do {
628f4235
KH
3340 if (signal_pending(current)) {
3341 ret = -EINTR;
3342 break;
3343 }
3e32cb2e 3344
bbec2e15 3345 mutex_lock(&memcg_max_mutex);
c054a78c
YZ
3346 /*
3347 * Make sure that the new limit (memsw or memory limit) doesn't
bbec2e15 3348 * break our basic invariant rule memory.max <= memsw.max.
c054a78c 3349 */
15b42562 3350 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
bbec2e15 3351 max <= memcg->memsw.max;
c054a78c 3352 if (!limits_invariant) {
bbec2e15 3353 mutex_unlock(&memcg_max_mutex);
8c7c6e34 3354 ret = -EINVAL;
8c7c6e34
KH
3355 break;
3356 }
bbec2e15 3357 if (max > counter->max)
3e32cb2e 3358 enlarge = true;
bbec2e15
RG
3359 ret = page_counter_set_max(counter, max);
3360 mutex_unlock(&memcg_max_mutex);
8c7c6e34
KH
3361
3362 if (!ret)
3363 break;
3364
bb4a7ea2
SB
3365 if (!drained) {
3366 drain_all_stock(memcg);
3367 drained = true;
3368 continue;
3369 }
3370
1ab5c056
AR
3371 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3372 GFP_KERNEL, !memsw)) {
3373 ret = -EBUSY;
3374 break;
3375 }
3376 } while (true);
3e32cb2e 3377
3c11ecf4
KH
3378 if (!ret && enlarge)
3379 memcg_oom_recover(memcg);
3e32cb2e 3380
628f4235
KH
3381 return ret;
3382}
3383
ef8f2327 3384unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
3385 gfp_t gfp_mask,
3386 unsigned long *total_scanned)
3387{
3388 unsigned long nr_reclaimed = 0;
ef8f2327 3389 struct mem_cgroup_per_node *mz, *next_mz = NULL;
0608f43d
AM
3390 unsigned long reclaimed;
3391 int loop = 0;
ef8f2327 3392 struct mem_cgroup_tree_per_node *mctz;
3e32cb2e 3393 unsigned long excess;
0608f43d
AM
3394 unsigned long nr_scanned;
3395
3396 if (order > 0)
3397 return 0;
3398
ef8f2327 3399 mctz = soft_limit_tree_node(pgdat->node_id);
d6507ff5
MH
3400
3401 /*
3402 * Do not even bother to check the largest node if the root
3403 * is empty. Do it lockless to prevent lock bouncing. Races
3404 * are acceptable as soft limit is best effort anyway.
3405 */
bfc7228b 3406 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
d6507ff5
MH
3407 return 0;
3408
0608f43d
AM
3409 /*
3410 * This loop can run a while, specially if mem_cgroup's continuously
3411 * keep exceeding their soft limit and putting the system under
3412 * pressure
3413 */
3414 do {
3415 if (next_mz)
3416 mz = next_mz;
3417 else
3418 mz = mem_cgroup_largest_soft_limit_node(mctz);
3419 if (!mz)
3420 break;
3421
3422 nr_scanned = 0;
ef8f2327 3423 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
0608f43d
AM
3424 gfp_mask, &nr_scanned);
3425 nr_reclaimed += reclaimed;
3426 *total_scanned += nr_scanned;
0a31bc97 3427 spin_lock_irq(&mctz->lock);
bc2f2e7f 3428 __mem_cgroup_remove_exceeded(mz, mctz);
0608f43d
AM
3429
3430 /*
3431 * If we failed to reclaim anything from this memory cgroup
3432 * it is time to move on to the next cgroup
3433 */
3434 next_mz = NULL;
bc2f2e7f
VD
3435 if (!reclaimed)
3436 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3437
3e32cb2e 3438 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
3439 /*
3440 * One school of thought says that we should not add
3441 * back the node to the tree if reclaim returns 0.
3442 * But our reclaim could return 0, simply because due
3443 * to priority we are exposing a smaller subset of
3444 * memory to reclaim from. Consider this as a longer
3445 * term TODO.
3446 */
3447 /* If excess == 0, no tree ops */
cf2c8127 3448 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 3449 spin_unlock_irq(&mctz->lock);
0608f43d
AM
3450 css_put(&mz->memcg->css);
3451 loop++;
3452 /*
3453 * Could not reclaim anything and there are no more
3454 * mem cgroups to try or we seem to be looping without
3455 * reclaiming anything.
3456 */
3457 if (!nr_reclaimed &&
3458 (next_mz == NULL ||
3459 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3460 break;
3461 } while (!nr_reclaimed);
3462 if (next_mz)
3463 css_put(&next_mz->memcg->css);
3464 return nr_reclaimed;
3465}
3466
ea280e7b
TH
3467/*
3468 * Test whether @memcg has children, dead or alive. Note that this
3469 * function doesn't care whether @memcg has use_hierarchy enabled and
3470 * returns %true if there are child csses according to the cgroup
b8f2935f 3471 * hierarchy. Testing use_hierarchy is the caller's responsibility.
ea280e7b 3472 */
b5f99b53
GC
3473static inline bool memcg_has_children(struct mem_cgroup *memcg)
3474{
ea280e7b
TH
3475 bool ret;
3476
ea280e7b
TH
3477 rcu_read_lock();
3478 ret = css_next_child(NULL, &memcg->css);
3479 rcu_read_unlock();
3480 return ret;
b5f99b53
GC
3481}
3482
c26251f9 3483/*
51038171 3484 * Reclaims as many pages from the given memcg as possible.
c26251f9
MH
3485 *
3486 * Caller is responsible for holding css reference for memcg.
3487 */
3488static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3489{
d977aa93 3490 int nr_retries = MAX_RECLAIM_RETRIES;
c26251f9 3491
c1e862c1
KH
3492 /* we call try-to-free pages for make this cgroup empty */
3493 lru_add_drain_all();
d12c60f6
JS
3494
3495 drain_all_stock(memcg);
3496
f817ed48 3497 /* try to free all pages in this cgroup */
3e32cb2e 3498 while (nr_retries && page_counter_read(&memcg->memory)) {
f817ed48 3499 int progress;
c1e862c1 3500
c26251f9
MH
3501 if (signal_pending(current))
3502 return -EINTR;
3503
b70a2a21
JW
3504 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3505 GFP_KERNEL, true);
c1e862c1 3506 if (!progress) {
f817ed48 3507 nr_retries--;
c1e862c1 3508 /* maybe some writeback is necessary */
8aa7e847 3509 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 3510 }
f817ed48
KH
3511
3512 }
ab5196c2
MH
3513
3514 return 0;
cc847582
KH
3515}
3516
6770c64e
TH
3517static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3518 char *buf, size_t nbytes,
3519 loff_t off)
c1e862c1 3520{
6770c64e 3521 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 3522
d8423011
MH
3523 if (mem_cgroup_is_root(memcg))
3524 return -EINVAL;
6770c64e 3525 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
3526}
3527
182446d0
TH
3528static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3529 struct cftype *cft)
18f59ea7 3530{
182446d0 3531 return mem_cgroup_from_css(css)->use_hierarchy;
18f59ea7
BS
3532}
3533
182446d0
TH
3534static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3535 struct cftype *cft, u64 val)
18f59ea7
BS
3536{
3537 int retval = 0;
182446d0 3538 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5c9d535b 3539 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
18f59ea7 3540
567fb435 3541 if (memcg->use_hierarchy == val)
0b8f73e1 3542 return 0;
567fb435 3543
18f59ea7 3544 /*
af901ca1 3545 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
3546 * in the child subtrees. If it is unset, then the change can
3547 * occur, provided the current cgroup has no children.
3548 *
3549 * For the root cgroup, parent_mem is NULL, we allow value to be
3550 * set if there are no children.
3551 */
c0ff4b85 3552 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
18f59ea7 3553 (val == 1 || val == 0)) {
ea280e7b 3554 if (!memcg_has_children(memcg))
c0ff4b85 3555 memcg->use_hierarchy = val;
18f59ea7
BS
3556 else
3557 retval = -EBUSY;
3558 } else
3559 retval = -EINVAL;
567fb435 3560
18f59ea7
BS
3561 return retval;
3562}
3563
6f646156 3564static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
ce00a967 3565{
42a30035 3566 unsigned long val;
ce00a967 3567
3e32cb2e 3568 if (mem_cgroup_is_root(memcg)) {
0d1c2072 3569 val = memcg_page_state(memcg, NR_FILE_PAGES) +
be5d0a74 3570 memcg_page_state(memcg, NR_ANON_MAPPED);
42a30035
JW
3571 if (swap)
3572 val += memcg_page_state(memcg, MEMCG_SWAP);
3e32cb2e 3573 } else {
ce00a967 3574 if (!swap)
3e32cb2e 3575 val = page_counter_read(&memcg->memory);
ce00a967 3576 else
3e32cb2e 3577 val = page_counter_read(&memcg->memsw);
ce00a967 3578 }
c12176d3 3579 return val;
ce00a967
JW
3580}
3581
3e32cb2e
JW
3582enum {
3583 RES_USAGE,
3584 RES_LIMIT,
3585 RES_MAX_USAGE,
3586 RES_FAILCNT,
3587 RES_SOFT_LIMIT,
3588};
ce00a967 3589
791badbd 3590static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 3591 struct cftype *cft)
8cdea7c0 3592{
182446d0 3593 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 3594 struct page_counter *counter;
af36f906 3595
3e32cb2e 3596 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 3597 case _MEM:
3e32cb2e
JW
3598 counter = &memcg->memory;
3599 break;
8c7c6e34 3600 case _MEMSWAP:
3e32cb2e
JW
3601 counter = &memcg->memsw;
3602 break;
510fc4e1 3603 case _KMEM:
3e32cb2e 3604 counter = &memcg->kmem;
510fc4e1 3605 break;
d55f90bf 3606 case _TCP:
0db15298 3607 counter = &memcg->tcpmem;
d55f90bf 3608 break;
8c7c6e34
KH
3609 default:
3610 BUG();
8c7c6e34 3611 }
3e32cb2e
JW
3612
3613 switch (MEMFILE_ATTR(cft->private)) {
3614 case RES_USAGE:
3615 if (counter == &memcg->memory)
c12176d3 3616 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3e32cb2e 3617 if (counter == &memcg->memsw)
c12176d3 3618 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3e32cb2e
JW
3619 return (u64)page_counter_read(counter) * PAGE_SIZE;
3620 case RES_LIMIT:
bbec2e15 3621 return (u64)counter->max * PAGE_SIZE;
3e32cb2e
JW
3622 case RES_MAX_USAGE:
3623 return (u64)counter->watermark * PAGE_SIZE;
3624 case RES_FAILCNT:
3625 return counter->failcnt;
3626 case RES_SOFT_LIMIT:
3627 return (u64)memcg->soft_limit * PAGE_SIZE;
3628 default:
3629 BUG();
3630 }
8cdea7c0 3631}
510fc4e1 3632
4a87e2a2 3633static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
c350a99e 3634{
4a87e2a2 3635 unsigned long stat[MEMCG_NR_STAT] = {0};
c350a99e
RG
3636 struct mem_cgroup *mi;
3637 int node, cpu, i;
c350a99e
RG
3638
3639 for_each_online_cpu(cpu)
4a87e2a2 3640 for (i = 0; i < MEMCG_NR_STAT; i++)
6c1c2808 3641 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
c350a99e
RG
3642
3643 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
4a87e2a2 3644 for (i = 0; i < MEMCG_NR_STAT; i++)
c350a99e
RG
3645 atomic_long_add(stat[i], &mi->vmstats[i]);
3646
3647 for_each_node(node) {
3648 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3649 struct mem_cgroup_per_node *pi;
3650
4a87e2a2 3651 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
c350a99e
RG
3652 stat[i] = 0;
3653
3654 for_each_online_cpu(cpu)
4a87e2a2 3655 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
6c1c2808
SB
3656 stat[i] += per_cpu(
3657 pn->lruvec_stat_cpu->count[i], cpu);
c350a99e
RG
3658
3659 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
4a87e2a2 3660 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
c350a99e
RG
3661 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3662 }
3663}
3664
bb65f89b
RG
3665static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3666{
3667 unsigned long events[NR_VM_EVENT_ITEMS];
3668 struct mem_cgroup *mi;
3669 int cpu, i;
3670
3671 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3672 events[i] = 0;
3673
3674 for_each_online_cpu(cpu)
3675 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
6c1c2808
SB
3676 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3677 cpu);
bb65f89b
RG
3678
3679 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3680 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3681 atomic_long_add(events[i], &mi->vmevents[i]);
3682}
3683
84c07d11 3684#ifdef CONFIG_MEMCG_KMEM
567e9ab2 3685static int memcg_online_kmem(struct mem_cgroup *memcg)
d6441637 3686{
bf4f0599 3687 struct obj_cgroup *objcg;
d6441637
VD
3688 int memcg_id;
3689
b313aeee
VD
3690 if (cgroup_memory_nokmem)
3691 return 0;
3692
2a4db7eb 3693 BUG_ON(memcg->kmemcg_id >= 0);
567e9ab2 3694 BUG_ON(memcg->kmem_state);
d6441637 3695
f3bb3043 3696 memcg_id = memcg_alloc_cache_id();
0b8f73e1
JW
3697 if (memcg_id < 0)
3698 return memcg_id;
d6441637 3699
bf4f0599
RG
3700 objcg = obj_cgroup_alloc();
3701 if (!objcg) {
3702 memcg_free_cache_id(memcg_id);
3703 return -ENOMEM;
3704 }
3705 objcg->memcg = memcg;
3706 rcu_assign_pointer(memcg->objcg, objcg);
3707
d648bcc7
RG
3708 static_branch_enable(&memcg_kmem_enabled_key);
3709
d6441637 3710 /*
567e9ab2 3711 * A memory cgroup is considered kmem-online as soon as it gets
900a38f0 3712 * kmemcg_id. Setting the id after enabling static branching will
d6441637
VD
3713 * guarantee no one starts accounting before all call sites are
3714 * patched.
3715 */
900a38f0 3716 memcg->kmemcg_id = memcg_id;
567e9ab2 3717 memcg->kmem_state = KMEM_ONLINE;
0b8f73e1
JW
3718
3719 return 0;
d6441637
VD
3720}
3721
8e0a8912
JW
3722static void memcg_offline_kmem(struct mem_cgroup *memcg)
3723{
3724 struct cgroup_subsys_state *css;
3725 struct mem_cgroup *parent, *child;
3726 int kmemcg_id;
3727
3728 if (memcg->kmem_state != KMEM_ONLINE)
3729 return;
9855609b 3730
8e0a8912
JW
3731 memcg->kmem_state = KMEM_ALLOCATED;
3732
8e0a8912
JW
3733 parent = parent_mem_cgroup(memcg);
3734 if (!parent)
3735 parent = root_mem_cgroup;
3736
bf4f0599 3737 memcg_reparent_objcgs(memcg, parent);
fb2f2b0a
RG
3738
3739 kmemcg_id = memcg->kmemcg_id;
3740 BUG_ON(kmemcg_id < 0);
3741
8e0a8912
JW
3742 /*
3743 * Change kmemcg_id of this cgroup and all its descendants to the
3744 * parent's id, and then move all entries from this cgroup's list_lrus
3745 * to ones of the parent. After we have finished, all list_lrus
3746 * corresponding to this cgroup are guaranteed to remain empty. The
3747 * ordering is imposed by list_lru_node->lock taken by
3748 * memcg_drain_all_list_lrus().
3749 */
3a06bb78 3750 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
8e0a8912
JW
3751 css_for_each_descendant_pre(css, &memcg->css) {
3752 child = mem_cgroup_from_css(css);
3753 BUG_ON(child->kmemcg_id != kmemcg_id);
3754 child->kmemcg_id = parent->kmemcg_id;
3755 if (!memcg->use_hierarchy)
3756 break;
3757 }
3a06bb78
TH
3758 rcu_read_unlock();
3759
9bec5c35 3760 memcg_drain_all_list_lrus(kmemcg_id, parent);
8e0a8912
JW
3761
3762 memcg_free_cache_id(kmemcg_id);
3763}
3764
3765static void memcg_free_kmem(struct mem_cgroup *memcg)
3766{
0b8f73e1
JW
3767 /* css_alloc() failed, offlining didn't happen */
3768 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3769 memcg_offline_kmem(memcg);
8e0a8912 3770}
d6441637 3771#else
0b8f73e1 3772static int memcg_online_kmem(struct mem_cgroup *memcg)
127424c8
JW
3773{
3774 return 0;
3775}
3776static void memcg_offline_kmem(struct mem_cgroup *memcg)
3777{
3778}
3779static void memcg_free_kmem(struct mem_cgroup *memcg)
3780{
3781}
84c07d11 3782#endif /* CONFIG_MEMCG_KMEM */
127424c8 3783
bbec2e15
RG
3784static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3785 unsigned long max)
d6441637 3786{
b313aeee 3787 int ret;
127424c8 3788
bbec2e15
RG
3789 mutex_lock(&memcg_max_mutex);
3790 ret = page_counter_set_max(&memcg->kmem, max);
3791 mutex_unlock(&memcg_max_mutex);
127424c8 3792 return ret;
d6441637 3793}
510fc4e1 3794
bbec2e15 3795static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
d55f90bf
VD
3796{
3797 int ret;
3798
bbec2e15 3799 mutex_lock(&memcg_max_mutex);
d55f90bf 3800
bbec2e15 3801 ret = page_counter_set_max(&memcg->tcpmem, max);
d55f90bf
VD
3802 if (ret)
3803 goto out;
3804
0db15298 3805 if (!memcg->tcpmem_active) {
d55f90bf
VD
3806 /*
3807 * The active flag needs to be written after the static_key
3808 * update. This is what guarantees that the socket activation
2d758073
JW
3809 * function is the last one to run. See mem_cgroup_sk_alloc()
3810 * for details, and note that we don't mark any socket as
3811 * belonging to this memcg until that flag is up.
d55f90bf
VD
3812 *
3813 * We need to do this, because static_keys will span multiple
3814 * sites, but we can't control their order. If we mark a socket
3815 * as accounted, but the accounting functions are not patched in
3816 * yet, we'll lose accounting.
3817 *
2d758073 3818 * We never race with the readers in mem_cgroup_sk_alloc(),
d55f90bf
VD
3819 * because when this value change, the code to process it is not
3820 * patched in yet.
3821 */
3822 static_branch_inc(&memcg_sockets_enabled_key);
0db15298 3823 memcg->tcpmem_active = true;
d55f90bf
VD
3824 }
3825out:
bbec2e15 3826 mutex_unlock(&memcg_max_mutex);
d55f90bf
VD
3827 return ret;
3828}
d55f90bf 3829
628f4235
KH
3830/*
3831 * The user of this function is...
3832 * RES_LIMIT.
3833 */
451af504
TH
3834static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3835 char *buf, size_t nbytes, loff_t off)
8cdea7c0 3836{
451af504 3837 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3838 unsigned long nr_pages;
628f4235
KH
3839 int ret;
3840
451af504 3841 buf = strstrip(buf);
650c5e56 3842 ret = page_counter_memparse(buf, "-1", &nr_pages);
3e32cb2e
JW
3843 if (ret)
3844 return ret;
af36f906 3845
3e32cb2e 3846 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 3847 case RES_LIMIT:
4b3bde4c
BS
3848 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3849 ret = -EINVAL;
3850 break;
3851 }
3e32cb2e
JW
3852 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3853 case _MEM:
bbec2e15 3854 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
8c7c6e34 3855 break;
3e32cb2e 3856 case _MEMSWAP:
bbec2e15 3857 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
296c81d8 3858 break;
3e32cb2e 3859 case _KMEM:
0158115f
MH
3860 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3861 "Please report your usecase to linux-mm@kvack.org if you "
3862 "depend on this functionality.\n");
bbec2e15 3863 ret = memcg_update_kmem_max(memcg, nr_pages);
3e32cb2e 3864 break;
d55f90bf 3865 case _TCP:
bbec2e15 3866 ret = memcg_update_tcp_max(memcg, nr_pages);
d55f90bf 3867 break;
3e32cb2e 3868 }
296c81d8 3869 break;
3e32cb2e
JW
3870 case RES_SOFT_LIMIT:
3871 memcg->soft_limit = nr_pages;
3872 ret = 0;
628f4235
KH
3873 break;
3874 }
451af504 3875 return ret ?: nbytes;
8cdea7c0
BS
3876}
3877
6770c64e
TH
3878static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3879 size_t nbytes, loff_t off)
c84872e1 3880{
6770c64e 3881 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3882 struct page_counter *counter;
c84872e1 3883
3e32cb2e
JW
3884 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3885 case _MEM:
3886 counter = &memcg->memory;
3887 break;
3888 case _MEMSWAP:
3889 counter = &memcg->memsw;
3890 break;
3891 case _KMEM:
3892 counter = &memcg->kmem;
3893 break;
d55f90bf 3894 case _TCP:
0db15298 3895 counter = &memcg->tcpmem;
d55f90bf 3896 break;
3e32cb2e
JW
3897 default:
3898 BUG();
3899 }
af36f906 3900
3e32cb2e 3901 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 3902 case RES_MAX_USAGE:
3e32cb2e 3903 page_counter_reset_watermark(counter);
29f2a4da
PE
3904 break;
3905 case RES_FAILCNT:
3e32cb2e 3906 counter->failcnt = 0;
29f2a4da 3907 break;
3e32cb2e
JW
3908 default:
3909 BUG();
29f2a4da 3910 }
f64c3f54 3911
6770c64e 3912 return nbytes;
c84872e1
PE
3913}
3914
182446d0 3915static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
3916 struct cftype *cft)
3917{
182446d0 3918 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
3919}
3920
02491447 3921#ifdef CONFIG_MMU
182446d0 3922static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
3923 struct cftype *cft, u64 val)
3924{
182446d0 3925 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0 3926
1dfab5ab 3927 if (val & ~MOVE_MASK)
7dc74be0 3928 return -EINVAL;
ee5e8472 3929
7dc74be0 3930 /*
ee5e8472
GC
3931 * No kind of locking is needed in here, because ->can_attach() will
3932 * check this value once in the beginning of the process, and then carry
3933 * on with stale data. This means that changes to this value will only
3934 * affect task migrations starting after the change.
7dc74be0 3935 */
c0ff4b85 3936 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
3937 return 0;
3938}
02491447 3939#else
182446d0 3940static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
3941 struct cftype *cft, u64 val)
3942{
3943 return -ENOSYS;
3944}
3945#endif
7dc74be0 3946
406eb0c9 3947#ifdef CONFIG_NUMA
113b7dfd
JW
3948
3949#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3950#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3951#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3952
3953static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6 3954 int nid, unsigned int lru_mask, bool tree)
113b7dfd 3955{
867e5e1d 3956 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
113b7dfd
JW
3957 unsigned long nr = 0;
3958 enum lru_list lru;
3959
3960 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3961
3962 for_each_lru(lru) {
3963 if (!(BIT(lru) & lru_mask))
3964 continue;
dd8657b6
SB
3965 if (tree)
3966 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3967 else
3968 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
113b7dfd
JW
3969 }
3970 return nr;
3971}
3972
3973static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6
SB
3974 unsigned int lru_mask,
3975 bool tree)
113b7dfd
JW
3976{
3977 unsigned long nr = 0;
3978 enum lru_list lru;
3979
3980 for_each_lru(lru) {
3981 if (!(BIT(lru) & lru_mask))
3982 continue;
dd8657b6
SB
3983 if (tree)
3984 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3985 else
3986 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
113b7dfd
JW
3987 }
3988 return nr;
3989}
3990
2da8ca82 3991static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 3992{
25485de6
GT
3993 struct numa_stat {
3994 const char *name;
3995 unsigned int lru_mask;
3996 };
3997
3998 static const struct numa_stat stats[] = {
3999 { "total", LRU_ALL },
4000 { "file", LRU_ALL_FILE },
4001 { "anon", LRU_ALL_ANON },
4002 { "unevictable", BIT(LRU_UNEVICTABLE) },
4003 };
4004 const struct numa_stat *stat;
406eb0c9 4005 int nid;
aa9694bb 4006 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
406eb0c9 4007
25485de6 4008 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4009 seq_printf(m, "%s=%lu", stat->name,
4010 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4011 false));
4012 for_each_node_state(nid, N_MEMORY)
4013 seq_printf(m, " N%d=%lu", nid,
4014 mem_cgroup_node_nr_lru_pages(memcg, nid,
4015 stat->lru_mask, false));
25485de6 4016 seq_putc(m, '\n');
406eb0c9 4017 }
406eb0c9 4018
071aee13 4019 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4020
4021 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4022 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4023 true));
4024 for_each_node_state(nid, N_MEMORY)
4025 seq_printf(m, " N%d=%lu", nid,
4026 mem_cgroup_node_nr_lru_pages(memcg, nid,
4027 stat->lru_mask, true));
071aee13 4028 seq_putc(m, '\n');
406eb0c9 4029 }
406eb0c9 4030
406eb0c9
YH
4031 return 0;
4032}
4033#endif /* CONFIG_NUMA */
4034
c8713d0b 4035static const unsigned int memcg1_stats[] = {
0d1c2072 4036 NR_FILE_PAGES,
be5d0a74 4037 NR_ANON_MAPPED,
468c3982
JW
4038#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4039 NR_ANON_THPS,
4040#endif
c8713d0b
JW
4041 NR_SHMEM,
4042 NR_FILE_MAPPED,
4043 NR_FILE_DIRTY,
4044 NR_WRITEBACK,
4045 MEMCG_SWAP,
4046};
4047
4048static const char *const memcg1_stat_names[] = {
4049 "cache",
4050 "rss",
468c3982 4051#ifdef CONFIG_TRANSPARENT_HUGEPAGE
c8713d0b 4052 "rss_huge",
468c3982 4053#endif
c8713d0b
JW
4054 "shmem",
4055 "mapped_file",
4056 "dirty",
4057 "writeback",
4058 "swap",
4059};
4060
df0e53d0 4061/* Universal VM events cgroup1 shows, original sort order */
8dd53fd3 4062static const unsigned int memcg1_events[] = {
df0e53d0
JW
4063 PGPGIN,
4064 PGPGOUT,
4065 PGFAULT,
4066 PGMAJFAULT,
4067};
4068
2da8ca82 4069static int memcg_stat_show(struct seq_file *m, void *v)
d2ceb9b7 4070{
aa9694bb 4071 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3e32cb2e 4072 unsigned long memory, memsw;
af7c4b0e
JW
4073 struct mem_cgroup *mi;
4074 unsigned int i;
406eb0c9 4075
71cd3113 4076 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
70bc068c 4077
71cd3113 4078 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
468c3982
JW
4079 unsigned long nr;
4080
71cd3113 4081 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
1dd3a273 4082 continue;
468c3982
JW
4083 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4084#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4085 if (memcg1_stats[i] == NR_ANON_THPS)
4086 nr *= HPAGE_PMD_NR;
4087#endif
4088 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
1dd3a273 4089 }
7b854121 4090
df0e53d0 4091 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
ebc5d83d 4092 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
205b20cc 4093 memcg_events_local(memcg, memcg1_events[i]));
af7c4b0e
JW
4094
4095 for (i = 0; i < NR_LRU_LISTS; i++)
ebc5d83d 4096 seq_printf(m, "%s %lu\n", lru_list_name(i),
205b20cc 4097 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
21d89d15 4098 PAGE_SIZE);
af7c4b0e 4099
14067bb3 4100 /* Hierarchical information */
3e32cb2e
JW
4101 memory = memsw = PAGE_COUNTER_MAX;
4102 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
15b42562
CD
4103 memory = min(memory, READ_ONCE(mi->memory.max));
4104 memsw = min(memsw, READ_ONCE(mi->memsw.max));
fee7b548 4105 }
3e32cb2e
JW
4106 seq_printf(m, "hierarchical_memory_limit %llu\n",
4107 (u64)memory * PAGE_SIZE);
7941d214 4108 if (do_memsw_account())
3e32cb2e
JW
4109 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4110 (u64)memsw * PAGE_SIZE);
7f016ee8 4111
8de7ecc6 4112 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
71cd3113 4113 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
1dd3a273 4114 continue;
8de7ecc6 4115 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
dd923990
YS
4116 (u64)memcg_page_state(memcg, memcg1_stats[i]) *
4117 PAGE_SIZE);
af7c4b0e
JW
4118 }
4119
8de7ecc6 4120 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
ebc5d83d
KK
4121 seq_printf(m, "total_%s %llu\n",
4122 vm_event_name(memcg1_events[i]),
dd923990 4123 (u64)memcg_events(memcg, memcg1_events[i]));
af7c4b0e 4124
8de7ecc6 4125 for (i = 0; i < NR_LRU_LISTS; i++)
ebc5d83d 4126 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
42a30035
JW
4127 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4128 PAGE_SIZE);
14067bb3 4129
7f016ee8 4130#ifdef CONFIG_DEBUG_VM
7f016ee8 4131 {
ef8f2327
MG
4132 pg_data_t *pgdat;
4133 struct mem_cgroup_per_node *mz;
1431d4d1
JW
4134 unsigned long anon_cost = 0;
4135 unsigned long file_cost = 0;
7f016ee8 4136
ef8f2327
MG
4137 for_each_online_pgdat(pgdat) {
4138 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
7f016ee8 4139
1431d4d1
JW
4140 anon_cost += mz->lruvec.anon_cost;
4141 file_cost += mz->lruvec.file_cost;
ef8f2327 4142 }
1431d4d1
JW
4143 seq_printf(m, "anon_cost %lu\n", anon_cost);
4144 seq_printf(m, "file_cost %lu\n", file_cost);
7f016ee8
KM
4145 }
4146#endif
4147
d2ceb9b7
KH
4148 return 0;
4149}
4150
182446d0
TH
4151static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4152 struct cftype *cft)
a7885eb8 4153{
182446d0 4154 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4155
1f4c025b 4156 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4157}
4158
182446d0
TH
4159static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4160 struct cftype *cft, u64 val)
a7885eb8 4161{
182446d0 4162 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4163
3dae7fec 4164 if (val > 100)
a7885eb8
KM
4165 return -EINVAL;
4166
14208b0e 4167 if (css->parent)
3dae7fec
JW
4168 memcg->swappiness = val;
4169 else
4170 vm_swappiness = val;
068b38c1 4171
a7885eb8
KM
4172 return 0;
4173}
4174
2e72b634
KS
4175static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4176{
4177 struct mem_cgroup_threshold_ary *t;
3e32cb2e 4178 unsigned long usage;
2e72b634
KS
4179 int i;
4180
4181 rcu_read_lock();
4182 if (!swap)
2c488db2 4183 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4184 else
2c488db2 4185 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4186
4187 if (!t)
4188 goto unlock;
4189
ce00a967 4190 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
4191
4192 /*
748dad36 4193 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
4194 * If it's not true, a threshold was crossed after last
4195 * call of __mem_cgroup_threshold().
4196 */
5407a562 4197 i = t->current_threshold;
2e72b634
KS
4198
4199 /*
4200 * Iterate backward over array of thresholds starting from
4201 * current_threshold and check if a threshold is crossed.
4202 * If none of thresholds below usage is crossed, we read
4203 * only one element of the array here.
4204 */
4205 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4206 eventfd_signal(t->entries[i].eventfd, 1);
4207
4208 /* i = current_threshold + 1 */
4209 i++;
4210
4211 /*
4212 * Iterate forward over array of thresholds starting from
4213 * current_threshold+1 and check if a threshold is crossed.
4214 * If none of thresholds above usage is crossed, we read
4215 * only one element of the array here.
4216 */
4217 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4218 eventfd_signal(t->entries[i].eventfd, 1);
4219
4220 /* Update current_threshold */
5407a562 4221 t->current_threshold = i - 1;
2e72b634
KS
4222unlock:
4223 rcu_read_unlock();
4224}
4225
4226static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4227{
ad4ca5f4
KS
4228 while (memcg) {
4229 __mem_cgroup_threshold(memcg, false);
7941d214 4230 if (do_memsw_account())
ad4ca5f4
KS
4231 __mem_cgroup_threshold(memcg, true);
4232
4233 memcg = parent_mem_cgroup(memcg);
4234 }
2e72b634
KS
4235}
4236
4237static int compare_thresholds(const void *a, const void *b)
4238{
4239 const struct mem_cgroup_threshold *_a = a;
4240 const struct mem_cgroup_threshold *_b = b;
4241
2bff24a3
GT
4242 if (_a->threshold > _b->threshold)
4243 return 1;
4244
4245 if (_a->threshold < _b->threshold)
4246 return -1;
4247
4248 return 0;
2e72b634
KS
4249}
4250
c0ff4b85 4251static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
4252{
4253 struct mem_cgroup_eventfd_list *ev;
4254
2bcf2e92
MH
4255 spin_lock(&memcg_oom_lock);
4256
c0ff4b85 4257 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27 4258 eventfd_signal(ev->eventfd, 1);
2bcf2e92
MH
4259
4260 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4261 return 0;
4262}
4263
c0ff4b85 4264static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 4265{
7d74b06f
KH
4266 struct mem_cgroup *iter;
4267
c0ff4b85 4268 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 4269 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4270}
4271
59b6f873 4272static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 4273 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 4274{
2c488db2
KS
4275 struct mem_cgroup_thresholds *thresholds;
4276 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
4277 unsigned long threshold;
4278 unsigned long usage;
2c488db2 4279 int i, size, ret;
2e72b634 4280
650c5e56 4281 ret = page_counter_memparse(args, "-1", &threshold);
2e72b634
KS
4282 if (ret)
4283 return ret;
4284
4285 mutex_lock(&memcg->thresholds_lock);
2c488db2 4286
05b84301 4287 if (type == _MEM) {
2c488db2 4288 thresholds = &memcg->thresholds;
ce00a967 4289 usage = mem_cgroup_usage(memcg, false);
05b84301 4290 } else if (type == _MEMSWAP) {
2c488db2 4291 thresholds = &memcg->memsw_thresholds;
ce00a967 4292 usage = mem_cgroup_usage(memcg, true);
05b84301 4293 } else
2e72b634
KS
4294 BUG();
4295
2e72b634 4296 /* Check if a threshold crossed before adding a new one */
2c488db2 4297 if (thresholds->primary)
2e72b634
KS
4298 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4299
2c488db2 4300 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4301
4302 /* Allocate memory for new array of thresholds */
67b8046f 4303 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
2c488db2 4304 if (!new) {
2e72b634
KS
4305 ret = -ENOMEM;
4306 goto unlock;
4307 }
2c488db2 4308 new->size = size;
2e72b634
KS
4309
4310 /* Copy thresholds (if any) to new array */
e90342e6
GS
4311 if (thresholds->primary)
4312 memcpy(new->entries, thresholds->primary->entries,
4313 flex_array_size(new, entries, size - 1));
2c488db2 4314
2e72b634 4315 /* Add new threshold */
2c488db2
KS
4316 new->entries[size - 1].eventfd = eventfd;
4317 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4318
4319 /* Sort thresholds. Registering of new threshold isn't time-critical */
61e604e6 4320 sort(new->entries, size, sizeof(*new->entries),
2e72b634
KS
4321 compare_thresholds, NULL);
4322
4323 /* Find current threshold */
2c488db2 4324 new->current_threshold = -1;
2e72b634 4325 for (i = 0; i < size; i++) {
748dad36 4326 if (new->entries[i].threshold <= usage) {
2e72b634 4327 /*
2c488db2
KS
4328 * new->current_threshold will not be used until
4329 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4330 * it here.
4331 */
2c488db2 4332 ++new->current_threshold;
748dad36
SZ
4333 } else
4334 break;
2e72b634
KS
4335 }
4336
2c488db2
KS
4337 /* Free old spare buffer and save old primary buffer as spare */
4338 kfree(thresholds->spare);
4339 thresholds->spare = thresholds->primary;
4340
4341 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4342
907860ed 4343 /* To be sure that nobody uses thresholds */
2e72b634
KS
4344 synchronize_rcu();
4345
2e72b634
KS
4346unlock:
4347 mutex_unlock(&memcg->thresholds_lock);
4348
4349 return ret;
4350}
4351
59b6f873 4352static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4353 struct eventfd_ctx *eventfd, const char *args)
4354{
59b6f873 4355 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
4356}
4357
59b6f873 4358static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4359 struct eventfd_ctx *eventfd, const char *args)
4360{
59b6f873 4361 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
4362}
4363
59b6f873 4364static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 4365 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 4366{
2c488db2
KS
4367 struct mem_cgroup_thresholds *thresholds;
4368 struct mem_cgroup_threshold_ary *new;
3e32cb2e 4369 unsigned long usage;
7d36665a 4370 int i, j, size, entries;
2e72b634
KS
4371
4372 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
4373
4374 if (type == _MEM) {
2c488db2 4375 thresholds = &memcg->thresholds;
ce00a967 4376 usage = mem_cgroup_usage(memcg, false);
05b84301 4377 } else if (type == _MEMSWAP) {
2c488db2 4378 thresholds = &memcg->memsw_thresholds;
ce00a967 4379 usage = mem_cgroup_usage(memcg, true);
05b84301 4380 } else
2e72b634
KS
4381 BUG();
4382
371528ca
AV
4383 if (!thresholds->primary)
4384 goto unlock;
4385
2e72b634
KS
4386 /* Check if a threshold crossed before removing */
4387 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4388
4389 /* Calculate new number of threshold */
7d36665a 4390 size = entries = 0;
2c488db2
KS
4391 for (i = 0; i < thresholds->primary->size; i++) {
4392 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634 4393 size++;
7d36665a
CX
4394 else
4395 entries++;
2e72b634
KS
4396 }
4397
2c488db2 4398 new = thresholds->spare;
907860ed 4399
7d36665a
CX
4400 /* If no items related to eventfd have been cleared, nothing to do */
4401 if (!entries)
4402 goto unlock;
4403
2e72b634
KS
4404 /* Set thresholds array to NULL if we don't have thresholds */
4405 if (!size) {
2c488db2
KS
4406 kfree(new);
4407 new = NULL;
907860ed 4408 goto swap_buffers;
2e72b634
KS
4409 }
4410
2c488db2 4411 new->size = size;
2e72b634
KS
4412
4413 /* Copy thresholds and find current threshold */
2c488db2
KS
4414 new->current_threshold = -1;
4415 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4416 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4417 continue;
4418
2c488db2 4419 new->entries[j] = thresholds->primary->entries[i];
748dad36 4420 if (new->entries[j].threshold <= usage) {
2e72b634 4421 /*
2c488db2 4422 * new->current_threshold will not be used
2e72b634
KS
4423 * until rcu_assign_pointer(), so it's safe to increment
4424 * it here.
4425 */
2c488db2 4426 ++new->current_threshold;
2e72b634
KS
4427 }
4428 j++;
4429 }
4430
907860ed 4431swap_buffers:
2c488db2
KS
4432 /* Swap primary and spare array */
4433 thresholds->spare = thresholds->primary;
8c757763 4434
2c488db2 4435 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4436
907860ed 4437 /* To be sure that nobody uses thresholds */
2e72b634 4438 synchronize_rcu();
6611d8d7
MC
4439
4440 /* If all events are unregistered, free the spare array */
4441 if (!new) {
4442 kfree(thresholds->spare);
4443 thresholds->spare = NULL;
4444 }
371528ca 4445unlock:
2e72b634 4446 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4447}
c1e862c1 4448
59b6f873 4449static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4450 struct eventfd_ctx *eventfd)
4451{
59b6f873 4452 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
4453}
4454
59b6f873 4455static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4456 struct eventfd_ctx *eventfd)
4457{
59b6f873 4458 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
4459}
4460
59b6f873 4461static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 4462 struct eventfd_ctx *eventfd, const char *args)
9490ff27 4463{
9490ff27 4464 struct mem_cgroup_eventfd_list *event;
9490ff27 4465
9490ff27
KH
4466 event = kmalloc(sizeof(*event), GFP_KERNEL);
4467 if (!event)
4468 return -ENOMEM;
4469
1af8efe9 4470 spin_lock(&memcg_oom_lock);
9490ff27
KH
4471
4472 event->eventfd = eventfd;
4473 list_add(&event->list, &memcg->oom_notify);
4474
4475 /* already in OOM ? */
c2b42d3c 4476 if (memcg->under_oom)
9490ff27 4477 eventfd_signal(eventfd, 1);
1af8efe9 4478 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4479
4480 return 0;
4481}
4482
59b6f873 4483static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 4484 struct eventfd_ctx *eventfd)
9490ff27 4485{
9490ff27 4486 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 4487
1af8efe9 4488 spin_lock(&memcg_oom_lock);
9490ff27 4489
c0ff4b85 4490 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
4491 if (ev->eventfd == eventfd) {
4492 list_del(&ev->list);
4493 kfree(ev);
4494 }
4495 }
4496
1af8efe9 4497 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4498}
4499
2da8ca82 4500static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 4501{
aa9694bb 4502 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
3c11ecf4 4503
791badbd 4504 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
c2b42d3c 4505 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
fe6bdfc8
RG
4506 seq_printf(sf, "oom_kill %lu\n",
4507 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
3c11ecf4
KH
4508 return 0;
4509}
4510
182446d0 4511static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
4512 struct cftype *cft, u64 val)
4513{
182446d0 4514 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
4515
4516 /* cannot set to root cgroup and only 0 and 1 are allowed */
14208b0e 4517 if (!css->parent || !((val == 0) || (val == 1)))
3c11ecf4
KH
4518 return -EINVAL;
4519
c0ff4b85 4520 memcg->oom_kill_disable = val;
4d845ebf 4521 if (!val)
c0ff4b85 4522 memcg_oom_recover(memcg);
3dae7fec 4523
3c11ecf4
KH
4524 return 0;
4525}
4526
52ebea74
TH
4527#ifdef CONFIG_CGROUP_WRITEBACK
4528
3a8e9ac8
TH
4529#include <trace/events/writeback.h>
4530
841710aa
TH
4531static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4532{
4533 return wb_domain_init(&memcg->cgwb_domain, gfp);
4534}
4535
4536static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4537{
4538 wb_domain_exit(&memcg->cgwb_domain);
4539}
4540
2529bb3a
TH
4541static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4542{
4543 wb_domain_size_changed(&memcg->cgwb_domain);
4544}
4545
841710aa
TH
4546struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4547{
4548 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4549
4550 if (!memcg->css.parent)
4551 return NULL;
4552
4553 return &memcg->cgwb_domain;
4554}
4555
0b3d6e6f
GT
4556/*
4557 * idx can be of type enum memcg_stat_item or node_stat_item.
4558 * Keep in sync with memcg_exact_page().
4559 */
4560static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4561{
871789d4 4562 long x = atomic_long_read(&memcg->vmstats[idx]);
0b3d6e6f
GT
4563 int cpu;
4564
4565 for_each_online_cpu(cpu)
871789d4 4566 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
0b3d6e6f
GT
4567 if (x < 0)
4568 x = 0;
4569 return x;
4570}
4571
c2aa723a
TH
4572/**
4573 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4574 * @wb: bdi_writeback in question
c5edf9cd
TH
4575 * @pfilepages: out parameter for number of file pages
4576 * @pheadroom: out parameter for number of allocatable pages according to memcg
c2aa723a
TH
4577 * @pdirty: out parameter for number of dirty pages
4578 * @pwriteback: out parameter for number of pages under writeback
4579 *
c5edf9cd
TH
4580 * Determine the numbers of file, headroom, dirty, and writeback pages in
4581 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4582 * is a bit more involved.
c2aa723a 4583 *
c5edf9cd
TH
4584 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4585 * headroom is calculated as the lowest headroom of itself and the
4586 * ancestors. Note that this doesn't consider the actual amount of
4587 * available memory in the system. The caller should further cap
4588 * *@pheadroom accordingly.
c2aa723a 4589 */
c5edf9cd
TH
4590void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4591 unsigned long *pheadroom, unsigned long *pdirty,
4592 unsigned long *pwriteback)
c2aa723a
TH
4593{
4594 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4595 struct mem_cgroup *parent;
c2aa723a 4596
0b3d6e6f 4597 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
c2aa723a 4598
0b3d6e6f 4599 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
21d89d15
JW
4600 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4601 memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
c5edf9cd 4602 *pheadroom = PAGE_COUNTER_MAX;
c2aa723a 4603
c2aa723a 4604 while ((parent = parent_mem_cgroup(memcg))) {
15b42562 4605 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
d1663a90 4606 READ_ONCE(memcg->memory.high));
c2aa723a
TH
4607 unsigned long used = page_counter_read(&memcg->memory);
4608
c5edf9cd 4609 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
c2aa723a
TH
4610 memcg = parent;
4611 }
c2aa723a
TH
4612}
4613
97b27821
TH
4614/*
4615 * Foreign dirty flushing
4616 *
4617 * There's an inherent mismatch between memcg and writeback. The former
4618 * trackes ownership per-page while the latter per-inode. This was a
4619 * deliberate design decision because honoring per-page ownership in the
4620 * writeback path is complicated, may lead to higher CPU and IO overheads
4621 * and deemed unnecessary given that write-sharing an inode across
4622 * different cgroups isn't a common use-case.
4623 *
4624 * Combined with inode majority-writer ownership switching, this works well
4625 * enough in most cases but there are some pathological cases. For
4626 * example, let's say there are two cgroups A and B which keep writing to
4627 * different but confined parts of the same inode. B owns the inode and
4628 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4629 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4630 * triggering background writeback. A will be slowed down without a way to
4631 * make writeback of the dirty pages happen.
4632 *
4633 * Conditions like the above can lead to a cgroup getting repatedly and
4634 * severely throttled after making some progress after each
4635 * dirty_expire_interval while the underyling IO device is almost
4636 * completely idle.
4637 *
4638 * Solving this problem completely requires matching the ownership tracking
4639 * granularities between memcg and writeback in either direction. However,
4640 * the more egregious behaviors can be avoided by simply remembering the
4641 * most recent foreign dirtying events and initiating remote flushes on
4642 * them when local writeback isn't enough to keep the memory clean enough.
4643 *
4644 * The following two functions implement such mechanism. When a foreign
4645 * page - a page whose memcg and writeback ownerships don't match - is
4646 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4647 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4648 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4649 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4650 * foreign bdi_writebacks which haven't expired. Both the numbers of
4651 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4652 * limited to MEMCG_CGWB_FRN_CNT.
4653 *
4654 * The mechanism only remembers IDs and doesn't hold any object references.
4655 * As being wrong occasionally doesn't matter, updates and accesses to the
4656 * records are lockless and racy.
4657 */
4658void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4659 struct bdi_writeback *wb)
4660{
4661 struct mem_cgroup *memcg = page->mem_cgroup;
4662 struct memcg_cgwb_frn *frn;
4663 u64 now = get_jiffies_64();
4664 u64 oldest_at = now;
4665 int oldest = -1;
4666 int i;
4667
3a8e9ac8
TH
4668 trace_track_foreign_dirty(page, wb);
4669
97b27821
TH
4670 /*
4671 * Pick the slot to use. If there is already a slot for @wb, keep
4672 * using it. If not replace the oldest one which isn't being
4673 * written out.
4674 */
4675 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4676 frn = &memcg->cgwb_frn[i];
4677 if (frn->bdi_id == wb->bdi->id &&
4678 frn->memcg_id == wb->memcg_css->id)
4679 break;
4680 if (time_before64(frn->at, oldest_at) &&
4681 atomic_read(&frn->done.cnt) == 1) {
4682 oldest = i;
4683 oldest_at = frn->at;
4684 }
4685 }
4686
4687 if (i < MEMCG_CGWB_FRN_CNT) {
4688 /*
4689 * Re-using an existing one. Update timestamp lazily to
4690 * avoid making the cacheline hot. We want them to be
4691 * reasonably up-to-date and significantly shorter than
4692 * dirty_expire_interval as that's what expires the record.
4693 * Use the shorter of 1s and dirty_expire_interval / 8.
4694 */
4695 unsigned long update_intv =
4696 min_t(unsigned long, HZ,
4697 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4698
4699 if (time_before64(frn->at, now - update_intv))
4700 frn->at = now;
4701 } else if (oldest >= 0) {
4702 /* replace the oldest free one */
4703 frn = &memcg->cgwb_frn[oldest];
4704 frn->bdi_id = wb->bdi->id;
4705 frn->memcg_id = wb->memcg_css->id;
4706 frn->at = now;
4707 }
4708}
4709
4710/* issue foreign writeback flushes for recorded foreign dirtying events */
4711void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4712{
4713 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4714 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4715 u64 now = jiffies_64;
4716 int i;
4717
4718 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4719 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4720
4721 /*
4722 * If the record is older than dirty_expire_interval,
4723 * writeback on it has already started. No need to kick it
4724 * off again. Also, don't start a new one if there's
4725 * already one in flight.
4726 */
4727 if (time_after64(frn->at, now - intv) &&
4728 atomic_read(&frn->done.cnt) == 1) {
4729 frn->at = 0;
3a8e9ac8 4730 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
97b27821
TH
4731 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4732 WB_REASON_FOREIGN_FLUSH,
4733 &frn->done);
4734 }
4735 }
4736}
4737
841710aa
TH
4738#else /* CONFIG_CGROUP_WRITEBACK */
4739
4740static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4741{
4742 return 0;
4743}
4744
4745static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4746{
4747}
4748
2529bb3a
TH
4749static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4750{
4751}
4752
52ebea74
TH
4753#endif /* CONFIG_CGROUP_WRITEBACK */
4754
3bc942f3
TH
4755/*
4756 * DO NOT USE IN NEW FILES.
4757 *
4758 * "cgroup.event_control" implementation.
4759 *
4760 * This is way over-engineered. It tries to support fully configurable
4761 * events for each user. Such level of flexibility is completely
4762 * unnecessary especially in the light of the planned unified hierarchy.
4763 *
4764 * Please deprecate this and replace with something simpler if at all
4765 * possible.
4766 */
4767
79bd9814
TH
4768/*
4769 * Unregister event and free resources.
4770 *
4771 * Gets called from workqueue.
4772 */
3bc942f3 4773static void memcg_event_remove(struct work_struct *work)
79bd9814 4774{
3bc942f3
TH
4775 struct mem_cgroup_event *event =
4776 container_of(work, struct mem_cgroup_event, remove);
59b6f873 4777 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
4778
4779 remove_wait_queue(event->wqh, &event->wait);
4780
59b6f873 4781 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
4782
4783 /* Notify userspace the event is going away. */
4784 eventfd_signal(event->eventfd, 1);
4785
4786 eventfd_ctx_put(event->eventfd);
4787 kfree(event);
59b6f873 4788 css_put(&memcg->css);
79bd9814
TH
4789}
4790
4791/*
a9a08845 4792 * Gets called on EPOLLHUP on eventfd when user closes it.
79bd9814
TH
4793 *
4794 * Called with wqh->lock held and interrupts disabled.
4795 */
ac6424b9 4796static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3bc942f3 4797 int sync, void *key)
79bd9814 4798{
3bc942f3
TH
4799 struct mem_cgroup_event *event =
4800 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 4801 struct mem_cgroup *memcg = event->memcg;
3ad6f93e 4802 __poll_t flags = key_to_poll(key);
79bd9814 4803
a9a08845 4804 if (flags & EPOLLHUP) {
79bd9814
TH
4805 /*
4806 * If the event has been detached at cgroup removal, we
4807 * can simply return knowing the other side will cleanup
4808 * for us.
4809 *
4810 * We can't race against event freeing since the other
4811 * side will require wqh->lock via remove_wait_queue(),
4812 * which we hold.
4813 */
fba94807 4814 spin_lock(&memcg->event_list_lock);
79bd9814
TH
4815 if (!list_empty(&event->list)) {
4816 list_del_init(&event->list);
4817 /*
4818 * We are in atomic context, but cgroup_event_remove()
4819 * may sleep, so we have to call it in workqueue.
4820 */
4821 schedule_work(&event->remove);
4822 }
fba94807 4823 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
4824 }
4825
4826 return 0;
4827}
4828
3bc942f3 4829static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
4830 wait_queue_head_t *wqh, poll_table *pt)
4831{
3bc942f3
TH
4832 struct mem_cgroup_event *event =
4833 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
4834
4835 event->wqh = wqh;
4836 add_wait_queue(wqh, &event->wait);
4837}
4838
4839/*
3bc942f3
TH
4840 * DO NOT USE IN NEW FILES.
4841 *
79bd9814
TH
4842 * Parse input and register new cgroup event handler.
4843 *
4844 * Input must be in format '<event_fd> <control_fd> <args>'.
4845 * Interpretation of args is defined by control file implementation.
4846 */
451af504
TH
4847static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4848 char *buf, size_t nbytes, loff_t off)
79bd9814 4849{
451af504 4850 struct cgroup_subsys_state *css = of_css(of);
fba94807 4851 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 4852 struct mem_cgroup_event *event;
79bd9814
TH
4853 struct cgroup_subsys_state *cfile_css;
4854 unsigned int efd, cfd;
4855 struct fd efile;
4856 struct fd cfile;
fba94807 4857 const char *name;
79bd9814
TH
4858 char *endp;
4859 int ret;
4860
451af504
TH
4861 buf = strstrip(buf);
4862
4863 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4864 if (*endp != ' ')
4865 return -EINVAL;
451af504 4866 buf = endp + 1;
79bd9814 4867
451af504 4868 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4869 if ((*endp != ' ') && (*endp != '\0'))
4870 return -EINVAL;
451af504 4871 buf = endp + 1;
79bd9814
TH
4872
4873 event = kzalloc(sizeof(*event), GFP_KERNEL);
4874 if (!event)
4875 return -ENOMEM;
4876
59b6f873 4877 event->memcg = memcg;
79bd9814 4878 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
4879 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4880 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4881 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
4882
4883 efile = fdget(efd);
4884 if (!efile.file) {
4885 ret = -EBADF;
4886 goto out_kfree;
4887 }
4888
4889 event->eventfd = eventfd_ctx_fileget(efile.file);
4890 if (IS_ERR(event->eventfd)) {
4891 ret = PTR_ERR(event->eventfd);
4892 goto out_put_efile;
4893 }
4894
4895 cfile = fdget(cfd);
4896 if (!cfile.file) {
4897 ret = -EBADF;
4898 goto out_put_eventfd;
4899 }
4900
4901 /* the process need read permission on control file */
4902 /* AV: shouldn't we check that it's been opened for read instead? */
4903 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4904 if (ret < 0)
4905 goto out_put_cfile;
4906
fba94807
TH
4907 /*
4908 * Determine the event callbacks and set them in @event. This used
4909 * to be done via struct cftype but cgroup core no longer knows
4910 * about these events. The following is crude but the whole thing
4911 * is for compatibility anyway.
3bc942f3
TH
4912 *
4913 * DO NOT ADD NEW FILES.
fba94807 4914 */
b583043e 4915 name = cfile.file->f_path.dentry->d_name.name;
fba94807
TH
4916
4917 if (!strcmp(name, "memory.usage_in_bytes")) {
4918 event->register_event = mem_cgroup_usage_register_event;
4919 event->unregister_event = mem_cgroup_usage_unregister_event;
4920 } else if (!strcmp(name, "memory.oom_control")) {
4921 event->register_event = mem_cgroup_oom_register_event;
4922 event->unregister_event = mem_cgroup_oom_unregister_event;
4923 } else if (!strcmp(name, "memory.pressure_level")) {
4924 event->register_event = vmpressure_register_event;
4925 event->unregister_event = vmpressure_unregister_event;
4926 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
4927 event->register_event = memsw_cgroup_usage_register_event;
4928 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
4929 } else {
4930 ret = -EINVAL;
4931 goto out_put_cfile;
4932 }
4933
79bd9814 4934 /*
b5557c4c
TH
4935 * Verify @cfile should belong to @css. Also, remaining events are
4936 * automatically removed on cgroup destruction but the removal is
4937 * asynchronous, so take an extra ref on @css.
79bd9814 4938 */
b583043e 4939 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
ec903c0c 4940 &memory_cgrp_subsys);
79bd9814 4941 ret = -EINVAL;
5a17f543 4942 if (IS_ERR(cfile_css))
79bd9814 4943 goto out_put_cfile;
5a17f543
TH
4944 if (cfile_css != css) {
4945 css_put(cfile_css);
79bd9814 4946 goto out_put_cfile;
5a17f543 4947 }
79bd9814 4948
451af504 4949 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
4950 if (ret)
4951 goto out_put_css;
4952
9965ed17 4953 vfs_poll(efile.file, &event->pt);
79bd9814 4954
fba94807
TH
4955 spin_lock(&memcg->event_list_lock);
4956 list_add(&event->list, &memcg->event_list);
4957 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
4958
4959 fdput(cfile);
4960 fdput(efile);
4961
451af504 4962 return nbytes;
79bd9814
TH
4963
4964out_put_css:
b5557c4c 4965 css_put(css);
79bd9814
TH
4966out_put_cfile:
4967 fdput(cfile);
4968out_put_eventfd:
4969 eventfd_ctx_put(event->eventfd);
4970out_put_efile:
4971 fdput(efile);
4972out_kfree:
4973 kfree(event);
4974
4975 return ret;
4976}
4977
241994ed 4978static struct cftype mem_cgroup_legacy_files[] = {
8cdea7c0 4979 {
0eea1030 4980 .name = "usage_in_bytes",
8c7c6e34 4981 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 4982 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 4983 },
c84872e1
PE
4984 {
4985 .name = "max_usage_in_bytes",
8c7c6e34 4986 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 4987 .write = mem_cgroup_reset,
791badbd 4988 .read_u64 = mem_cgroup_read_u64,
c84872e1 4989 },
8cdea7c0 4990 {
0eea1030 4991 .name = "limit_in_bytes",
8c7c6e34 4992 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 4993 .write = mem_cgroup_write,
791badbd 4994 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 4995 },
296c81d8
BS
4996 {
4997 .name = "soft_limit_in_bytes",
4998 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 4999 .write = mem_cgroup_write,
791badbd 5000 .read_u64 = mem_cgroup_read_u64,
296c81d8 5001 },
8cdea7c0
BS
5002 {
5003 .name = "failcnt",
8c7c6e34 5004 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 5005 .write = mem_cgroup_reset,
791badbd 5006 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5007 },
d2ceb9b7
KH
5008 {
5009 .name = "stat",
2da8ca82 5010 .seq_show = memcg_stat_show,
d2ceb9b7 5011 },
c1e862c1
KH
5012 {
5013 .name = "force_empty",
6770c64e 5014 .write = mem_cgroup_force_empty_write,
c1e862c1 5015 },
18f59ea7
BS
5016 {
5017 .name = "use_hierarchy",
5018 .write_u64 = mem_cgroup_hierarchy_write,
5019 .read_u64 = mem_cgroup_hierarchy_read,
5020 },
79bd9814 5021 {
3bc942f3 5022 .name = "cgroup.event_control", /* XXX: for compat */
451af504 5023 .write = memcg_write_event_control,
7dbdb199 5024 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
79bd9814 5025 },
a7885eb8
KM
5026 {
5027 .name = "swappiness",
5028 .read_u64 = mem_cgroup_swappiness_read,
5029 .write_u64 = mem_cgroup_swappiness_write,
5030 },
7dc74be0
DN
5031 {
5032 .name = "move_charge_at_immigrate",
5033 .read_u64 = mem_cgroup_move_charge_read,
5034 .write_u64 = mem_cgroup_move_charge_write,
5035 },
9490ff27
KH
5036 {
5037 .name = "oom_control",
2da8ca82 5038 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 5039 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
5040 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5041 },
70ddf637
AV
5042 {
5043 .name = "pressure_level",
70ddf637 5044 },
406eb0c9
YH
5045#ifdef CONFIG_NUMA
5046 {
5047 .name = "numa_stat",
2da8ca82 5048 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
5049 },
5050#endif
510fc4e1
GC
5051 {
5052 .name = "kmem.limit_in_bytes",
5053 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
451af504 5054 .write = mem_cgroup_write,
791badbd 5055 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5056 },
5057 {
5058 .name = "kmem.usage_in_bytes",
5059 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 5060 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5061 },
5062 {
5063 .name = "kmem.failcnt",
5064 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 5065 .write = mem_cgroup_reset,
791badbd 5066 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5067 },
5068 {
5069 .name = "kmem.max_usage_in_bytes",
5070 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 5071 .write = mem_cgroup_reset,
791badbd 5072 .read_u64 = mem_cgroup_read_u64,
510fc4e1 5073 },
a87425a3
YS
5074#if defined(CONFIG_MEMCG_KMEM) && \
5075 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
749c5415
GC
5076 {
5077 .name = "kmem.slabinfo",
b047501c 5078 .seq_show = memcg_slab_show,
749c5415
GC
5079 },
5080#endif
d55f90bf
VD
5081 {
5082 .name = "kmem.tcp.limit_in_bytes",
5083 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5084 .write = mem_cgroup_write,
5085 .read_u64 = mem_cgroup_read_u64,
5086 },
5087 {
5088 .name = "kmem.tcp.usage_in_bytes",
5089 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5090 .read_u64 = mem_cgroup_read_u64,
5091 },
5092 {
5093 .name = "kmem.tcp.failcnt",
5094 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5095 .write = mem_cgroup_reset,
5096 .read_u64 = mem_cgroup_read_u64,
5097 },
5098 {
5099 .name = "kmem.tcp.max_usage_in_bytes",
5100 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5101 .write = mem_cgroup_reset,
5102 .read_u64 = mem_cgroup_read_u64,
5103 },
6bc10349 5104 { }, /* terminate */
af36f906 5105};
8c7c6e34 5106
73f576c0
JW
5107/*
5108 * Private memory cgroup IDR
5109 *
5110 * Swap-out records and page cache shadow entries need to store memcg
5111 * references in constrained space, so we maintain an ID space that is
5112 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5113 * memory-controlled cgroups to 64k.
5114 *
b8f2935f 5115 * However, there usually are many references to the offline CSS after
73f576c0
JW
5116 * the cgroup has been destroyed, such as page cache or reclaimable
5117 * slab objects, that don't need to hang on to the ID. We want to keep
5118 * those dead CSS from occupying IDs, or we might quickly exhaust the
5119 * relatively small ID space and prevent the creation of new cgroups
5120 * even when there are much fewer than 64k cgroups - possibly none.
5121 *
5122 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5123 * be freed and recycled when it's no longer needed, which is usually
5124 * when the CSS is offlined.
5125 *
5126 * The only exception to that are records of swapped out tmpfs/shmem
5127 * pages that need to be attributed to live ancestors on swapin. But
5128 * those references are manageable from userspace.
5129 */
5130
5131static DEFINE_IDR(mem_cgroup_idr);
5132
7e97de0b
KT
5133static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5134{
5135 if (memcg->id.id > 0) {
5136 idr_remove(&mem_cgroup_idr, memcg->id.id);
5137 memcg->id.id = 0;
5138 }
5139}
5140
c1514c0a
VF
5141static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5142 unsigned int n)
73f576c0 5143{
1c2d479a 5144 refcount_add(n, &memcg->id.ref);
73f576c0
JW
5145}
5146
615d66c3 5147static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 5148{
1c2d479a 5149 if (refcount_sub_and_test(n, &memcg->id.ref)) {
7e97de0b 5150 mem_cgroup_id_remove(memcg);
73f576c0
JW
5151
5152 /* Memcg ID pins CSS */
5153 css_put(&memcg->css);
5154 }
5155}
5156
615d66c3
VD
5157static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5158{
5159 mem_cgroup_id_put_many(memcg, 1);
5160}
5161
73f576c0
JW
5162/**
5163 * mem_cgroup_from_id - look up a memcg from a memcg id
5164 * @id: the memcg id to look up
5165 *
5166 * Caller must hold rcu_read_lock().
5167 */
5168struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5169{
5170 WARN_ON_ONCE(!rcu_read_lock_held());
5171 return idr_find(&mem_cgroup_idr, id);
5172}
5173
ef8f2327 5174static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
5175{
5176 struct mem_cgroup_per_node *pn;
ef8f2327 5177 int tmp = node;
1ecaab2b
KH
5178 /*
5179 * This routine is called against possible nodes.
5180 * But it's BUG to call kmalloc() against offline node.
5181 *
5182 * TODO: this routine can waste much memory for nodes which will
5183 * never be onlined. It's better to use memory hotplug callback
5184 * function.
5185 */
41e3355d
KH
5186 if (!node_state(node, N_NORMAL_MEMORY))
5187 tmp = -1;
17295c88 5188 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
5189 if (!pn)
5190 return 1;
1ecaab2b 5191
3e38e0aa
RG
5192 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5193 GFP_KERNEL_ACCOUNT);
815744d7
JW
5194 if (!pn->lruvec_stat_local) {
5195 kfree(pn);
5196 return 1;
5197 }
5198
3e38e0aa
RG
5199 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5200 GFP_KERNEL_ACCOUNT);
a983b5eb 5201 if (!pn->lruvec_stat_cpu) {
815744d7 5202 free_percpu(pn->lruvec_stat_local);
00f3ca2c
JW
5203 kfree(pn);
5204 return 1;
5205 }
5206
ef8f2327
MG
5207 lruvec_init(&pn->lruvec);
5208 pn->usage_in_excess = 0;
5209 pn->on_tree = false;
5210 pn->memcg = memcg;
5211
54f72fe0 5212 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
5213 return 0;
5214}
5215
ef8f2327 5216static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1ecaab2b 5217{
00f3ca2c
JW
5218 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5219
4eaf431f
MH
5220 if (!pn)
5221 return;
5222
a983b5eb 5223 free_percpu(pn->lruvec_stat_cpu);
815744d7 5224 free_percpu(pn->lruvec_stat_local);
00f3ca2c 5225 kfree(pn);
1ecaab2b
KH
5226}
5227
40e952f9 5228static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 5229{
c8b2a36f 5230 int node;
59927fb9 5231
c8b2a36f 5232 for_each_node(node)
ef8f2327 5233 free_mem_cgroup_per_node_info(memcg, node);
871789d4 5234 free_percpu(memcg->vmstats_percpu);
815744d7 5235 free_percpu(memcg->vmstats_local);
8ff69e2c 5236 kfree(memcg);
59927fb9 5237}
3afe36b1 5238
40e952f9
TE
5239static void mem_cgroup_free(struct mem_cgroup *memcg)
5240{
5241 memcg_wb_domain_exit(memcg);
7961eee3
SB
5242 /*
5243 * Flush percpu vmstats and vmevents to guarantee the value correctness
5244 * on parent's and all ancestor levels.
5245 */
4a87e2a2 5246 memcg_flush_percpu_vmstats(memcg);
7961eee3 5247 memcg_flush_percpu_vmevents(memcg);
40e952f9
TE
5248 __mem_cgroup_free(memcg);
5249}
5250
0b8f73e1 5251static struct mem_cgroup *mem_cgroup_alloc(void)
8cdea7c0 5252{
d142e3e6 5253 struct mem_cgroup *memcg;
b9726c26 5254 unsigned int size;
6d12e2d8 5255 int node;
97b27821 5256 int __maybe_unused i;
11d67612 5257 long error = -ENOMEM;
8cdea7c0 5258
0b8f73e1
JW
5259 size = sizeof(struct mem_cgroup);
5260 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5261
5262 memcg = kzalloc(size, GFP_KERNEL);
c0ff4b85 5263 if (!memcg)
11d67612 5264 return ERR_PTR(error);
0b8f73e1 5265
73f576c0
JW
5266 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5267 1, MEM_CGROUP_ID_MAX,
5268 GFP_KERNEL);
11d67612
YS
5269 if (memcg->id.id < 0) {
5270 error = memcg->id.id;
73f576c0 5271 goto fail;
11d67612 5272 }
73f576c0 5273
3e38e0aa
RG
5274 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5275 GFP_KERNEL_ACCOUNT);
815744d7
JW
5276 if (!memcg->vmstats_local)
5277 goto fail;
5278
3e38e0aa
RG
5279 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5280 GFP_KERNEL_ACCOUNT);
871789d4 5281 if (!memcg->vmstats_percpu)
0b8f73e1 5282 goto fail;
78fb7466 5283
3ed28fa1 5284 for_each_node(node)
ef8f2327 5285 if (alloc_mem_cgroup_per_node_info(memcg, node))
0b8f73e1 5286 goto fail;
f64c3f54 5287
0b8f73e1
JW
5288 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5289 goto fail;
28dbc4b6 5290
f7e1cb6e 5291 INIT_WORK(&memcg->high_work, high_work_func);
d142e3e6 5292 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
5293 mutex_init(&memcg->thresholds_lock);
5294 spin_lock_init(&memcg->move_lock);
70ddf637 5295 vmpressure_init(&memcg->vmpressure);
fba94807
TH
5296 INIT_LIST_HEAD(&memcg->event_list);
5297 spin_lock_init(&memcg->event_list_lock);
d886f4e4 5298 memcg->socket_pressure = jiffies;
84c07d11 5299#ifdef CONFIG_MEMCG_KMEM
900a38f0 5300 memcg->kmemcg_id = -1;
bf4f0599 5301 INIT_LIST_HEAD(&memcg->objcg_list);
900a38f0 5302#endif
52ebea74
TH
5303#ifdef CONFIG_CGROUP_WRITEBACK
5304 INIT_LIST_HEAD(&memcg->cgwb_list);
97b27821
TH
5305 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5306 memcg->cgwb_frn[i].done =
5307 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
87eaceb3
YS
5308#endif
5309#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5310 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5311 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5312 memcg->deferred_split_queue.split_queue_len = 0;
52ebea74 5313#endif
73f576c0 5314 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
0b8f73e1
JW
5315 return memcg;
5316fail:
7e97de0b 5317 mem_cgroup_id_remove(memcg);
40e952f9 5318 __mem_cgroup_free(memcg);
11d67612 5319 return ERR_PTR(error);
d142e3e6
GC
5320}
5321
0b8f73e1
JW
5322static struct cgroup_subsys_state * __ref
5323mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
d142e3e6 5324{
0b8f73e1 5325 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
b87d8cef 5326 struct mem_cgroup *memcg, *old_memcg;
0b8f73e1 5327 long error = -ENOMEM;
d142e3e6 5328
b87d8cef 5329 old_memcg = set_active_memcg(parent);
0b8f73e1 5330 memcg = mem_cgroup_alloc();
b87d8cef 5331 set_active_memcg(old_memcg);
11d67612
YS
5332 if (IS_ERR(memcg))
5333 return ERR_CAST(memcg);
d142e3e6 5334
d1663a90 5335 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
0b8f73e1 5336 memcg->soft_limit = PAGE_COUNTER_MAX;
4b82ab4f 5337 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
0b8f73e1
JW
5338 if (parent) {
5339 memcg->swappiness = mem_cgroup_swappiness(parent);
5340 memcg->oom_kill_disable = parent->oom_kill_disable;
5341 }
5342 if (parent && parent->use_hierarchy) {
5343 memcg->use_hierarchy = true;
3e32cb2e 5344 page_counter_init(&memcg->memory, &parent->memory);
37e84351 5345 page_counter_init(&memcg->swap, &parent->swap);
3e32cb2e 5346 page_counter_init(&memcg->kmem, &parent->kmem);
0db15298 5347 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
18f59ea7 5348 } else {
3e32cb2e 5349 page_counter_init(&memcg->memory, NULL);
37e84351 5350 page_counter_init(&memcg->swap, NULL);
3e32cb2e 5351 page_counter_init(&memcg->kmem, NULL);
0db15298 5352 page_counter_init(&memcg->tcpmem, NULL);
8c7f6edb
TH
5353 /*
5354 * Deeper hierachy with use_hierarchy == false doesn't make
5355 * much sense so let cgroup subsystem know about this
5356 * unfortunate state in our controller.
5357 */
d142e3e6 5358 if (parent != root_mem_cgroup)
073219e9 5359 memory_cgrp_subsys.broken_hierarchy = true;
18f59ea7 5360 }
d6441637 5361
0b8f73e1
JW
5362 /* The following stuff does not apply to the root */
5363 if (!parent) {
5364 root_mem_cgroup = memcg;
5365 return &memcg->css;
5366 }
5367
b313aeee 5368 error = memcg_online_kmem(memcg);
0b8f73e1
JW
5369 if (error)
5370 goto fail;
127424c8 5371
f7e1cb6e 5372 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5373 static_branch_inc(&memcg_sockets_enabled_key);
f7e1cb6e 5374
0b8f73e1
JW
5375 return &memcg->css;
5376fail:
7e97de0b 5377 mem_cgroup_id_remove(memcg);
0b8f73e1 5378 mem_cgroup_free(memcg);
11d67612 5379 return ERR_PTR(error);
0b8f73e1
JW
5380}
5381
73f576c0 5382static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
0b8f73e1 5383{
58fa2a55
VD
5384 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5385
0a4465d3
KT
5386 /*
5387 * A memcg must be visible for memcg_expand_shrinker_maps()
5388 * by the time the maps are allocated. So, we allocate maps
5389 * here, when for_each_mem_cgroup() can't skip it.
5390 */
5391 if (memcg_alloc_shrinker_maps(memcg)) {
5392 mem_cgroup_id_remove(memcg);
5393 return -ENOMEM;
5394 }
5395
73f576c0 5396 /* Online state pins memcg ID, memcg ID pins CSS */
1c2d479a 5397 refcount_set(&memcg->id.ref, 1);
73f576c0 5398 css_get(css);
2f7dd7a4 5399 return 0;
8cdea7c0
BS
5400}
5401
eb95419b 5402static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 5403{
eb95419b 5404 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5405 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
5406
5407 /*
5408 * Unregister events and notify userspace.
5409 * Notify userspace about cgroup removing only after rmdir of cgroup
5410 * directory to avoid race between userspace and kernelspace.
5411 */
fba94807
TH
5412 spin_lock(&memcg->event_list_lock);
5413 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
5414 list_del_init(&event->list);
5415 schedule_work(&event->remove);
5416 }
fba94807 5417 spin_unlock(&memcg->event_list_lock);
ec64f515 5418
bf8d5d52 5419 page_counter_set_min(&memcg->memory, 0);
23067153 5420 page_counter_set_low(&memcg->memory, 0);
63677c74 5421
567e9ab2 5422 memcg_offline_kmem(memcg);
52ebea74 5423 wb_memcg_offline(memcg);
73f576c0 5424
591edfb1
RG
5425 drain_all_stock(memcg);
5426
73f576c0 5427 mem_cgroup_id_put(memcg);
df878fb0
KH
5428}
5429
6df38689
VD
5430static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5431{
5432 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5433
5434 invalidate_reclaim_iterators(memcg);
5435}
5436
eb95419b 5437static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 5438{
eb95419b 5439 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
97b27821 5440 int __maybe_unused i;
c268e994 5441
97b27821
TH
5442#ifdef CONFIG_CGROUP_WRITEBACK
5443 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5444 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5445#endif
f7e1cb6e 5446 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5447 static_branch_dec(&memcg_sockets_enabled_key);
127424c8 5448
0db15298 5449 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
d55f90bf 5450 static_branch_dec(&memcg_sockets_enabled_key);
3893e302 5451
0b8f73e1
JW
5452 vmpressure_cleanup(&memcg->vmpressure);
5453 cancel_work_sync(&memcg->high_work);
5454 mem_cgroup_remove_from_trees(memcg);
0a4465d3 5455 memcg_free_shrinker_maps(memcg);
d886f4e4 5456 memcg_free_kmem(memcg);
0b8f73e1 5457 mem_cgroup_free(memcg);
8cdea7c0
BS
5458}
5459
1ced953b
TH
5460/**
5461 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5462 * @css: the target css
5463 *
5464 * Reset the states of the mem_cgroup associated with @css. This is
5465 * invoked when the userland requests disabling on the default hierarchy
5466 * but the memcg is pinned through dependency. The memcg should stop
5467 * applying policies and should revert to the vanilla state as it may be
5468 * made visible again.
5469 *
5470 * The current implementation only resets the essential configurations.
5471 * This needs to be expanded to cover all the visible parts.
5472 */
5473static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5474{
5475 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5476
bbec2e15
RG
5477 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5478 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
bbec2e15
RG
5479 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5480 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
bf8d5d52 5481 page_counter_set_min(&memcg->memory, 0);
23067153 5482 page_counter_set_low(&memcg->memory, 0);
d1663a90 5483 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
24d404dc 5484 memcg->soft_limit = PAGE_COUNTER_MAX;
4b82ab4f 5485 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
2529bb3a 5486 memcg_wb_domain_size_changed(memcg);
1ced953b
TH
5487}
5488
02491447 5489#ifdef CONFIG_MMU
7dc74be0 5490/* Handlers for move charge at task migration. */
854ffa8d 5491static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 5492{
05b84301 5493 int ret;
9476db97 5494
d0164adc
MG
5495 /* Try a single bulk charge without reclaim first, kswapd may wake */
5496 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
9476db97 5497 if (!ret) {
854ffa8d 5498 mc.precharge += count;
854ffa8d
DN
5499 return ret;
5500 }
9476db97 5501
3674534b 5502 /* Try charges one by one with reclaim, but do not retry */
854ffa8d 5503 while (count--) {
3674534b 5504 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
38c5d72f 5505 if (ret)
38c5d72f 5506 return ret;
854ffa8d 5507 mc.precharge++;
9476db97 5508 cond_resched();
854ffa8d 5509 }
9476db97 5510 return 0;
4ffef5fe
DN
5511}
5512
4ffef5fe
DN
5513union mc_target {
5514 struct page *page;
02491447 5515 swp_entry_t ent;
4ffef5fe
DN
5516};
5517
4ffef5fe 5518enum mc_target_type {
8d32ff84 5519 MC_TARGET_NONE = 0,
4ffef5fe 5520 MC_TARGET_PAGE,
02491447 5521 MC_TARGET_SWAP,
c733a828 5522 MC_TARGET_DEVICE,
4ffef5fe
DN
5523};
5524
90254a65
DN
5525static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5526 unsigned long addr, pte_t ptent)
4ffef5fe 5527{
25b2995a 5528 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 5529
90254a65
DN
5530 if (!page || !page_mapped(page))
5531 return NULL;
5532 if (PageAnon(page)) {
1dfab5ab 5533 if (!(mc.flags & MOVE_ANON))
90254a65 5534 return NULL;
1dfab5ab
JW
5535 } else {
5536 if (!(mc.flags & MOVE_FILE))
5537 return NULL;
5538 }
90254a65
DN
5539 if (!get_page_unless_zero(page))
5540 return NULL;
5541
5542 return page;
5543}
5544
c733a828 5545#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
90254a65 5546static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5547 pte_t ptent, swp_entry_t *entry)
90254a65 5548{
90254a65
DN
5549 struct page *page = NULL;
5550 swp_entry_t ent = pte_to_swp_entry(ptent);
5551
9a137153 5552 if (!(mc.flags & MOVE_ANON))
90254a65 5553 return NULL;
c733a828
JG
5554
5555 /*
5556 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5557 * a device and because they are not accessible by CPU they are store
5558 * as special swap entry in the CPU page table.
5559 */
5560 if (is_device_private_entry(ent)) {
5561 page = device_private_entry_to_page(ent);
5562 /*
5563 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5564 * a refcount of 1 when free (unlike normal page)
5565 */
5566 if (!page_ref_add_unless(page, 1, 1))
5567 return NULL;
5568 return page;
5569 }
5570
9a137153
RC
5571 if (non_swap_entry(ent))
5572 return NULL;
5573
4b91355e
KH
5574 /*
5575 * Because lookup_swap_cache() updates some statistics counter,
5576 * we call find_get_page() with swapper_space directly.
5577 */
f6ab1f7f 5578 page = find_get_page(swap_address_space(ent), swp_offset(ent));
2d1c4980 5579 entry->val = ent.val;
90254a65
DN
5580
5581 return page;
5582}
4b91355e
KH
5583#else
5584static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5585 pte_t ptent, swp_entry_t *entry)
4b91355e
KH
5586{
5587 return NULL;
5588}
5589#endif
90254a65 5590
87946a72
DN
5591static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5592 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5593{
87946a72
DN
5594 if (!vma->vm_file) /* anonymous vma */
5595 return NULL;
1dfab5ab 5596 if (!(mc.flags & MOVE_FILE))
87946a72
DN
5597 return NULL;
5598
87946a72 5599 /* page is moved even if it's not RSS of this task(page-faulted). */
aa3b1895 5600 /* shmem/tmpfs may report page out on swap: account for that too. */
f5df8635
MWO
5601 return find_get_incore_page(vma->vm_file->f_mapping,
5602 linear_page_index(vma, addr));
87946a72
DN
5603}
5604
b1b0deab
CG
5605/**
5606 * mem_cgroup_move_account - move account of the page
5607 * @page: the page
25843c2b 5608 * @compound: charge the page as compound or small page
b1b0deab
CG
5609 * @from: mem_cgroup which the page is moved from.
5610 * @to: mem_cgroup which the page is moved to. @from != @to.
5611 *
3ac808fd 5612 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
b1b0deab
CG
5613 *
5614 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5615 * from old cgroup.
5616 */
5617static int mem_cgroup_move_account(struct page *page,
f627c2f5 5618 bool compound,
b1b0deab
CG
5619 struct mem_cgroup *from,
5620 struct mem_cgroup *to)
5621{
ae8af438
KK
5622 struct lruvec *from_vec, *to_vec;
5623 struct pglist_data *pgdat;
6c357848 5624 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
b1b0deab
CG
5625 int ret;
5626
5627 VM_BUG_ON(from == to);
5628 VM_BUG_ON_PAGE(PageLRU(page), page);
f627c2f5 5629 VM_BUG_ON(compound && !PageTransHuge(page));
b1b0deab
CG
5630
5631 /*
6a93ca8f 5632 * Prevent mem_cgroup_migrate() from looking at
45637bab 5633 * page->mem_cgroup of its source page while we change it.
b1b0deab 5634 */
f627c2f5 5635 ret = -EBUSY;
b1b0deab
CG
5636 if (!trylock_page(page))
5637 goto out;
5638
5639 ret = -EINVAL;
5640 if (page->mem_cgroup != from)
5641 goto out_unlock;
5642
ae8af438 5643 pgdat = page_pgdat(page);
867e5e1d
JW
5644 from_vec = mem_cgroup_lruvec(from, pgdat);
5645 to_vec = mem_cgroup_lruvec(to, pgdat);
ae8af438 5646
abb242f5 5647 lock_page_memcg(page);
b1b0deab 5648
be5d0a74
JW
5649 if (PageAnon(page)) {
5650 if (page_mapped(page)) {
5651 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5652 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
468c3982
JW
5653 if (PageTransHuge(page)) {
5654 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5655 -nr_pages);
5656 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5657 nr_pages);
5658 }
5659
be5d0a74
JW
5660 }
5661 } else {
0d1c2072
JW
5662 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5663 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5664
5665 if (PageSwapBacked(page)) {
5666 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5667 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5668 }
5669
49e50d27
JW
5670 if (page_mapped(page)) {
5671 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5672 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5673 }
b1b0deab 5674
49e50d27
JW
5675 if (PageDirty(page)) {
5676 struct address_space *mapping = page_mapping(page);
c4843a75 5677
f56753ac 5678 if (mapping_can_writeback(mapping)) {
49e50d27
JW
5679 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5680 -nr_pages);
5681 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5682 nr_pages);
5683 }
c4843a75
GT
5684 }
5685 }
5686
b1b0deab 5687 if (PageWriteback(page)) {
ae8af438
KK
5688 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5689 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
b1b0deab
CG
5690 }
5691
5692 /*
abb242f5
JW
5693 * All state has been migrated, let's switch to the new memcg.
5694 *
b1b0deab 5695 * It is safe to change page->mem_cgroup here because the page
abb242f5
JW
5696 * is referenced, charged, isolated, and locked: we can't race
5697 * with (un)charging, migration, LRU putback, or anything else
5698 * that would rely on a stable page->mem_cgroup.
5699 *
5700 * Note that lock_page_memcg is a memcg lock, not a page lock,
5701 * to save space. As soon as we switch page->mem_cgroup to a
5702 * new memcg that isn't locked, the above state can change
5703 * concurrently again. Make sure we're truly done with it.
b1b0deab 5704 */
abb242f5 5705 smp_mb();
b1b0deab 5706
1a3e1f40
JW
5707 css_get(&to->css);
5708 css_put(&from->css);
5709
5710 page->mem_cgroup = to;
87eaceb3 5711
abb242f5 5712 __unlock_page_memcg(from);
b1b0deab
CG
5713
5714 ret = 0;
5715
5716 local_irq_disable();
3fba69a5 5717 mem_cgroup_charge_statistics(to, page, nr_pages);
b1b0deab 5718 memcg_check_events(to, page);
3fba69a5 5719 mem_cgroup_charge_statistics(from, page, -nr_pages);
b1b0deab
CG
5720 memcg_check_events(from, page);
5721 local_irq_enable();
5722out_unlock:
5723 unlock_page(page);
5724out:
5725 return ret;
5726}
5727
7cf7806c
LR
5728/**
5729 * get_mctgt_type - get target type of moving charge
5730 * @vma: the vma the pte to be checked belongs
5731 * @addr: the address corresponding to the pte to be checked
5732 * @ptent: the pte to be checked
5733 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5734 *
5735 * Returns
5736 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5737 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5738 * move charge. if @target is not NULL, the page is stored in target->page
5739 * with extra refcnt got(Callers should handle it).
5740 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5741 * target for charge migration. if @target is not NULL, the entry is stored
5742 * in target->ent.
25b2995a
CH
5743 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5744 * (so ZONE_DEVICE page and thus not on the lru).
df6ad698
JG
5745 * For now we such page is charge like a regular page would be as for all
5746 * intent and purposes it is just special memory taking the place of a
5747 * regular page.
c733a828
JG
5748 *
5749 * See Documentations/vm/hmm.txt and include/linux/hmm.h
7cf7806c
LR
5750 *
5751 * Called with pte lock held.
5752 */
5753
8d32ff84 5754static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
5755 unsigned long addr, pte_t ptent, union mc_target *target)
5756{
5757 struct page *page = NULL;
8d32ff84 5758 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
5759 swp_entry_t ent = { .val = 0 };
5760
5761 if (pte_present(ptent))
5762 page = mc_handle_present_pte(vma, addr, ptent);
5763 else if (is_swap_pte(ptent))
48406ef8 5764 page = mc_handle_swap_pte(vma, ptent, &ent);
0661a336 5765 else if (pte_none(ptent))
87946a72 5766 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
5767
5768 if (!page && !ent.val)
8d32ff84 5769 return ret;
02491447 5770 if (page) {
02491447 5771 /*
0a31bc97 5772 * Do only loose check w/o serialization.
1306a85a 5773 * mem_cgroup_move_account() checks the page is valid or
0a31bc97 5774 * not under LRU exclusion.
02491447 5775 */
1306a85a 5776 if (page->mem_cgroup == mc.from) {
02491447 5777 ret = MC_TARGET_PAGE;
25b2995a 5778 if (is_device_private_page(page))
c733a828 5779 ret = MC_TARGET_DEVICE;
02491447
DN
5780 if (target)
5781 target->page = page;
5782 }
5783 if (!ret || !target)
5784 put_page(page);
5785 }
3e14a57b
HY
5786 /*
5787 * There is a swap entry and a page doesn't exist or isn't charged.
5788 * But we cannot move a tail-page in a THP.
5789 */
5790 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
34c00c31 5791 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
5792 ret = MC_TARGET_SWAP;
5793 if (target)
5794 target->ent = ent;
4ffef5fe 5795 }
4ffef5fe
DN
5796 return ret;
5797}
5798
12724850
NH
5799#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5800/*
d6810d73
HY
5801 * We don't consider PMD mapped swapping or file mapped pages because THP does
5802 * not support them for now.
12724850
NH
5803 * Caller should make sure that pmd_trans_huge(pmd) is true.
5804 */
5805static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5806 unsigned long addr, pmd_t pmd, union mc_target *target)
5807{
5808 struct page *page = NULL;
12724850
NH
5809 enum mc_target_type ret = MC_TARGET_NONE;
5810
84c3fc4e
ZY
5811 if (unlikely(is_swap_pmd(pmd))) {
5812 VM_BUG_ON(thp_migration_supported() &&
5813 !is_pmd_migration_entry(pmd));
5814 return ret;
5815 }
12724850 5816 page = pmd_page(pmd);
309381fe 5817 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
1dfab5ab 5818 if (!(mc.flags & MOVE_ANON))
12724850 5819 return ret;
1306a85a 5820 if (page->mem_cgroup == mc.from) {
12724850
NH
5821 ret = MC_TARGET_PAGE;
5822 if (target) {
5823 get_page(page);
5824 target->page = page;
5825 }
5826 }
5827 return ret;
5828}
5829#else
5830static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5831 unsigned long addr, pmd_t pmd, union mc_target *target)
5832{
5833 return MC_TARGET_NONE;
5834}
5835#endif
5836
4ffef5fe
DN
5837static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5838 unsigned long addr, unsigned long end,
5839 struct mm_walk *walk)
5840{
26bcd64a 5841 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
5842 pte_t *pte;
5843 spinlock_t *ptl;
5844
b6ec57f4
KS
5845 ptl = pmd_trans_huge_lock(pmd, vma);
5846 if (ptl) {
c733a828
JG
5847 /*
5848 * Note their can not be MC_TARGET_DEVICE for now as we do not
25b2995a
CH
5849 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5850 * this might change.
c733a828 5851 */
12724850
NH
5852 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5853 mc.precharge += HPAGE_PMD_NR;
bf929152 5854 spin_unlock(ptl);
1a5a9906 5855 return 0;
12724850 5856 }
03319327 5857
45f83cef
AA
5858 if (pmd_trans_unstable(pmd))
5859 return 0;
4ffef5fe
DN
5860 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5861 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 5862 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
5863 mc.precharge++; /* increment precharge temporarily */
5864 pte_unmap_unlock(pte - 1, ptl);
5865 cond_resched();
5866
7dc74be0
DN
5867 return 0;
5868}
5869
7b86ac33
CH
5870static const struct mm_walk_ops precharge_walk_ops = {
5871 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5872};
5873
4ffef5fe
DN
5874static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5875{
5876 unsigned long precharge;
4ffef5fe 5877
d8ed45c5 5878 mmap_read_lock(mm);
7b86ac33 5879 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
d8ed45c5 5880 mmap_read_unlock(mm);
4ffef5fe
DN
5881
5882 precharge = mc.precharge;
5883 mc.precharge = 0;
5884
5885 return precharge;
5886}
5887
4ffef5fe
DN
5888static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5889{
dfe076b0
DN
5890 unsigned long precharge = mem_cgroup_count_precharge(mm);
5891
5892 VM_BUG_ON(mc.moving_task);
5893 mc.moving_task = current;
5894 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
5895}
5896
dfe076b0
DN
5897/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5898static void __mem_cgroup_clear_mc(void)
4ffef5fe 5899{
2bd9bb20
KH
5900 struct mem_cgroup *from = mc.from;
5901 struct mem_cgroup *to = mc.to;
5902
4ffef5fe 5903 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 5904 if (mc.precharge) {
00501b53 5905 cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
5906 mc.precharge = 0;
5907 }
5908 /*
5909 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5910 * we must uncharge here.
5911 */
5912 if (mc.moved_charge) {
00501b53 5913 cancel_charge(mc.from, mc.moved_charge);
854ffa8d 5914 mc.moved_charge = 0;
4ffef5fe 5915 }
483c30b5
DN
5916 /* we must fixup refcnts and charges */
5917 if (mc.moved_swap) {
483c30b5 5918 /* uncharge swap account from the old cgroup */
ce00a967 5919 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 5920 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 5921
615d66c3
VD
5922 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5923
05b84301 5924 /*
3e32cb2e
JW
5925 * we charged both to->memory and to->memsw, so we
5926 * should uncharge to->memory.
05b84301 5927 */
ce00a967 5928 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
5929 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5930
483c30b5
DN
5931 mc.moved_swap = 0;
5932 }
dfe076b0
DN
5933 memcg_oom_recover(from);
5934 memcg_oom_recover(to);
5935 wake_up_all(&mc.waitq);
5936}
5937
5938static void mem_cgroup_clear_mc(void)
5939{
264a0ae1
TH
5940 struct mm_struct *mm = mc.mm;
5941
dfe076b0
DN
5942 /*
5943 * we must clear moving_task before waking up waiters at the end of
5944 * task migration.
5945 */
5946 mc.moving_task = NULL;
5947 __mem_cgroup_clear_mc();
2bd9bb20 5948 spin_lock(&mc.lock);
4ffef5fe
DN
5949 mc.from = NULL;
5950 mc.to = NULL;
264a0ae1 5951 mc.mm = NULL;
2bd9bb20 5952 spin_unlock(&mc.lock);
264a0ae1
TH
5953
5954 mmput(mm);
4ffef5fe
DN
5955}
5956
1f7dd3e5 5957static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
7dc74be0 5958{
1f7dd3e5 5959 struct cgroup_subsys_state *css;
eed67d75 5960 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
9f2115f9 5961 struct mem_cgroup *from;
4530eddb 5962 struct task_struct *leader, *p;
9f2115f9 5963 struct mm_struct *mm;
1dfab5ab 5964 unsigned long move_flags;
9f2115f9 5965 int ret = 0;
7dc74be0 5966
1f7dd3e5
TH
5967 /* charge immigration isn't supported on the default hierarchy */
5968 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
9f2115f9
TH
5969 return 0;
5970
4530eddb
TH
5971 /*
5972 * Multi-process migrations only happen on the default hierarchy
5973 * where charge immigration is not used. Perform charge
5974 * immigration if @tset contains a leader and whine if there are
5975 * multiple.
5976 */
5977 p = NULL;
1f7dd3e5 5978 cgroup_taskset_for_each_leader(leader, css, tset) {
4530eddb
TH
5979 WARN_ON_ONCE(p);
5980 p = leader;
1f7dd3e5 5981 memcg = mem_cgroup_from_css(css);
4530eddb
TH
5982 }
5983 if (!p)
5984 return 0;
5985
1f7dd3e5
TH
5986 /*
5987 * We are now commited to this value whatever it is. Changes in this
5988 * tunable will only affect upcoming migrations, not the current one.
5989 * So we need to save it, and keep it going.
5990 */
5991 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5992 if (!move_flags)
5993 return 0;
5994
9f2115f9
TH
5995 from = mem_cgroup_from_task(p);
5996
5997 VM_BUG_ON(from == memcg);
5998
5999 mm = get_task_mm(p);
6000 if (!mm)
6001 return 0;
6002 /* We move charges only when we move a owner of the mm */
6003 if (mm->owner == p) {
6004 VM_BUG_ON(mc.from);
6005 VM_BUG_ON(mc.to);
6006 VM_BUG_ON(mc.precharge);
6007 VM_BUG_ON(mc.moved_charge);
6008 VM_BUG_ON(mc.moved_swap);
6009
6010 spin_lock(&mc.lock);
264a0ae1 6011 mc.mm = mm;
9f2115f9
TH
6012 mc.from = from;
6013 mc.to = memcg;
6014 mc.flags = move_flags;
6015 spin_unlock(&mc.lock);
6016 /* We set mc.moving_task later */
6017
6018 ret = mem_cgroup_precharge_mc(mm);
6019 if (ret)
6020 mem_cgroup_clear_mc();
264a0ae1
TH
6021 } else {
6022 mmput(mm);
7dc74be0
DN
6023 }
6024 return ret;
6025}
6026
1f7dd3e5 6027static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
7dc74be0 6028{
4e2f245d
JW
6029 if (mc.to)
6030 mem_cgroup_clear_mc();
7dc74be0
DN
6031}
6032
4ffef5fe
DN
6033static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6034 unsigned long addr, unsigned long end,
6035 struct mm_walk *walk)
7dc74be0 6036{
4ffef5fe 6037 int ret = 0;
26bcd64a 6038 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6039 pte_t *pte;
6040 spinlock_t *ptl;
12724850
NH
6041 enum mc_target_type target_type;
6042 union mc_target target;
6043 struct page *page;
4ffef5fe 6044
b6ec57f4
KS
6045 ptl = pmd_trans_huge_lock(pmd, vma);
6046 if (ptl) {
62ade86a 6047 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 6048 spin_unlock(ptl);
12724850
NH
6049 return 0;
6050 }
6051 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6052 if (target_type == MC_TARGET_PAGE) {
6053 page = target.page;
6054 if (!isolate_lru_page(page)) {
f627c2f5 6055 if (!mem_cgroup_move_account(page, true,
1306a85a 6056 mc.from, mc.to)) {
12724850
NH
6057 mc.precharge -= HPAGE_PMD_NR;
6058 mc.moved_charge += HPAGE_PMD_NR;
6059 }
6060 putback_lru_page(page);
6061 }
6062 put_page(page);
c733a828
JG
6063 } else if (target_type == MC_TARGET_DEVICE) {
6064 page = target.page;
6065 if (!mem_cgroup_move_account(page, true,
6066 mc.from, mc.to)) {
6067 mc.precharge -= HPAGE_PMD_NR;
6068 mc.moved_charge += HPAGE_PMD_NR;
6069 }
6070 put_page(page);
12724850 6071 }
bf929152 6072 spin_unlock(ptl);
1a5a9906 6073 return 0;
12724850
NH
6074 }
6075
45f83cef
AA
6076 if (pmd_trans_unstable(pmd))
6077 return 0;
4ffef5fe
DN
6078retry:
6079 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6080 for (; addr != end; addr += PAGE_SIZE) {
6081 pte_t ptent = *(pte++);
c733a828 6082 bool device = false;
02491447 6083 swp_entry_t ent;
4ffef5fe
DN
6084
6085 if (!mc.precharge)
6086 break;
6087
8d32ff84 6088 switch (get_mctgt_type(vma, addr, ptent, &target)) {
c733a828
JG
6089 case MC_TARGET_DEVICE:
6090 device = true;
e4a9bc58 6091 fallthrough;
4ffef5fe
DN
6092 case MC_TARGET_PAGE:
6093 page = target.page;
53f9263b
KS
6094 /*
6095 * We can have a part of the split pmd here. Moving it
6096 * can be done but it would be too convoluted so simply
6097 * ignore such a partial THP and keep it in original
6098 * memcg. There should be somebody mapping the head.
6099 */
6100 if (PageTransCompound(page))
6101 goto put;
c733a828 6102 if (!device && isolate_lru_page(page))
4ffef5fe 6103 goto put;
f627c2f5
KS
6104 if (!mem_cgroup_move_account(page, false,
6105 mc.from, mc.to)) {
4ffef5fe 6106 mc.precharge--;
854ffa8d
DN
6107 /* we uncharge from mc.from later. */
6108 mc.moved_charge++;
4ffef5fe 6109 }
c733a828
JG
6110 if (!device)
6111 putback_lru_page(page);
8d32ff84 6112put: /* get_mctgt_type() gets the page */
4ffef5fe
DN
6113 put_page(page);
6114 break;
02491447
DN
6115 case MC_TARGET_SWAP:
6116 ent = target.ent;
e91cbb42 6117 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 6118 mc.precharge--;
8d22a935
HD
6119 mem_cgroup_id_get_many(mc.to, 1);
6120 /* we fixup other refcnts and charges later. */
483c30b5
DN
6121 mc.moved_swap++;
6122 }
02491447 6123 break;
4ffef5fe
DN
6124 default:
6125 break;
6126 }
6127 }
6128 pte_unmap_unlock(pte - 1, ptl);
6129 cond_resched();
6130
6131 if (addr != end) {
6132 /*
6133 * We have consumed all precharges we got in can_attach().
6134 * We try charge one by one, but don't do any additional
6135 * charges to mc.to if we have failed in charge once in attach()
6136 * phase.
6137 */
854ffa8d 6138 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
6139 if (!ret)
6140 goto retry;
6141 }
6142
6143 return ret;
6144}
6145
7b86ac33
CH
6146static const struct mm_walk_ops charge_walk_ops = {
6147 .pmd_entry = mem_cgroup_move_charge_pte_range,
6148};
6149
264a0ae1 6150static void mem_cgroup_move_charge(void)
4ffef5fe 6151{
4ffef5fe 6152 lru_add_drain_all();
312722cb 6153 /*
81f8c3a4
JW
6154 * Signal lock_page_memcg() to take the memcg's move_lock
6155 * while we're moving its pages to another memcg. Then wait
6156 * for already started RCU-only updates to finish.
312722cb
JW
6157 */
6158 atomic_inc(&mc.from->moving_account);
6159 synchronize_rcu();
dfe076b0 6160retry:
d8ed45c5 6161 if (unlikely(!mmap_read_trylock(mc.mm))) {
dfe076b0 6162 /*
c1e8d7c6 6163 * Someone who are holding the mmap_lock might be waiting in
dfe076b0
DN
6164 * waitq. So we cancel all extra charges, wake up all waiters,
6165 * and retry. Because we cancel precharges, we might not be able
6166 * to move enough charges, but moving charge is a best-effort
6167 * feature anyway, so it wouldn't be a big problem.
6168 */
6169 __mem_cgroup_clear_mc();
6170 cond_resched();
6171 goto retry;
6172 }
26bcd64a
NH
6173 /*
6174 * When we have consumed all precharges and failed in doing
6175 * additional charge, the page walk just aborts.
6176 */
7b86ac33
CH
6177 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6178 NULL);
0247f3f4 6179
d8ed45c5 6180 mmap_read_unlock(mc.mm);
312722cb 6181 atomic_dec(&mc.from->moving_account);
7dc74be0
DN
6182}
6183
264a0ae1 6184static void mem_cgroup_move_task(void)
67e465a7 6185{
264a0ae1
TH
6186 if (mc.to) {
6187 mem_cgroup_move_charge();
a433658c 6188 mem_cgroup_clear_mc();
264a0ae1 6189 }
67e465a7 6190}
5cfb80a7 6191#else /* !CONFIG_MMU */
1f7dd3e5 6192static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6193{
6194 return 0;
6195}
1f7dd3e5 6196static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6197{
6198}
264a0ae1 6199static void mem_cgroup_move_task(void)
5cfb80a7
DN
6200{
6201}
6202#endif
67e465a7 6203
f00baae7
TH
6204/*
6205 * Cgroup retains root cgroups across [un]mount cycles making it necessary
aa6ec29b
TH
6206 * to verify whether we're attached to the default hierarchy on each mount
6207 * attempt.
f00baae7 6208 */
eb95419b 6209static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
f00baae7
TH
6210{
6211 /*
aa6ec29b 6212 * use_hierarchy is forced on the default hierarchy. cgroup core
f00baae7
TH
6213 * guarantees that @root doesn't have any children, so turning it
6214 * on for the root memcg is enough.
6215 */
9e10a130 6216 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7feee590
VD
6217 root_mem_cgroup->use_hierarchy = true;
6218 else
6219 root_mem_cgroup->use_hierarchy = false;
f00baae7
TH
6220}
6221
677dc973
CD
6222static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6223{
6224 if (value == PAGE_COUNTER_MAX)
6225 seq_puts(m, "max\n");
6226 else
6227 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6228
6229 return 0;
6230}
6231
241994ed
JW
6232static u64 memory_current_read(struct cgroup_subsys_state *css,
6233 struct cftype *cft)
6234{
f5fc3c5d
JW
6235 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6236
6237 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
241994ed
JW
6238}
6239
bf8d5d52
RG
6240static int memory_min_show(struct seq_file *m, void *v)
6241{
677dc973
CD
6242 return seq_puts_memcg_tunable(m,
6243 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
bf8d5d52
RG
6244}
6245
6246static ssize_t memory_min_write(struct kernfs_open_file *of,
6247 char *buf, size_t nbytes, loff_t off)
6248{
6249 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6250 unsigned long min;
6251 int err;
6252
6253 buf = strstrip(buf);
6254 err = page_counter_memparse(buf, "max", &min);
6255 if (err)
6256 return err;
6257
6258 page_counter_set_min(&memcg->memory, min);
6259
6260 return nbytes;
6261}
6262
241994ed
JW
6263static int memory_low_show(struct seq_file *m, void *v)
6264{
677dc973
CD
6265 return seq_puts_memcg_tunable(m,
6266 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
241994ed
JW
6267}
6268
6269static ssize_t memory_low_write(struct kernfs_open_file *of,
6270 char *buf, size_t nbytes, loff_t off)
6271{
6272 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6273 unsigned long low;
6274 int err;
6275
6276 buf = strstrip(buf);
d2973697 6277 err = page_counter_memparse(buf, "max", &low);
241994ed
JW
6278 if (err)
6279 return err;
6280
23067153 6281 page_counter_set_low(&memcg->memory, low);
241994ed
JW
6282
6283 return nbytes;
6284}
6285
6286static int memory_high_show(struct seq_file *m, void *v)
6287{
d1663a90
JK
6288 return seq_puts_memcg_tunable(m,
6289 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
241994ed
JW
6290}
6291
6292static ssize_t memory_high_write(struct kernfs_open_file *of,
6293 char *buf, size_t nbytes, loff_t off)
6294{
6295 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6296 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
8c8c383c 6297 bool drained = false;
241994ed
JW
6298 unsigned long high;
6299 int err;
6300
6301 buf = strstrip(buf);
d2973697 6302 err = page_counter_memparse(buf, "max", &high);
241994ed
JW
6303 if (err)
6304 return err;
6305
8c8c383c
JW
6306 for (;;) {
6307 unsigned long nr_pages = page_counter_read(&memcg->memory);
6308 unsigned long reclaimed;
6309
6310 if (nr_pages <= high)
6311 break;
6312
6313 if (signal_pending(current))
6314 break;
6315
6316 if (!drained) {
6317 drain_all_stock(memcg);
6318 drained = true;
6319 continue;
6320 }
6321
6322 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6323 GFP_KERNEL, true);
6324
6325 if (!reclaimed && !nr_retries--)
6326 break;
6327 }
588083bb 6328
536d3bf2
RG
6329 page_counter_set_high(&memcg->memory, high);
6330
19ce33ac
JW
6331 memcg_wb_domain_size_changed(memcg);
6332
241994ed
JW
6333 return nbytes;
6334}
6335
6336static int memory_max_show(struct seq_file *m, void *v)
6337{
677dc973
CD
6338 return seq_puts_memcg_tunable(m,
6339 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
241994ed
JW
6340}
6341
6342static ssize_t memory_max_write(struct kernfs_open_file *of,
6343 char *buf, size_t nbytes, loff_t off)
6344{
6345 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6346 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
b6e6edcf 6347 bool drained = false;
241994ed
JW
6348 unsigned long max;
6349 int err;
6350
6351 buf = strstrip(buf);
d2973697 6352 err = page_counter_memparse(buf, "max", &max);
241994ed
JW
6353 if (err)
6354 return err;
6355
bbec2e15 6356 xchg(&memcg->memory.max, max);
b6e6edcf
JW
6357
6358 for (;;) {
6359 unsigned long nr_pages = page_counter_read(&memcg->memory);
6360
6361 if (nr_pages <= max)
6362 break;
6363
7249c9f0 6364 if (signal_pending(current))
b6e6edcf 6365 break;
b6e6edcf
JW
6366
6367 if (!drained) {
6368 drain_all_stock(memcg);
6369 drained = true;
6370 continue;
6371 }
6372
6373 if (nr_reclaims) {
6374 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6375 GFP_KERNEL, true))
6376 nr_reclaims--;
6377 continue;
6378 }
6379
e27be240 6380 memcg_memory_event(memcg, MEMCG_OOM);
b6e6edcf
JW
6381 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6382 break;
6383 }
241994ed 6384
2529bb3a 6385 memcg_wb_domain_size_changed(memcg);
241994ed
JW
6386 return nbytes;
6387}
6388
1e577f97
SB
6389static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6390{
6391 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6392 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6393 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6394 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6395 seq_printf(m, "oom_kill %lu\n",
6396 atomic_long_read(&events[MEMCG_OOM_KILL]));
6397}
6398
241994ed
JW
6399static int memory_events_show(struct seq_file *m, void *v)
6400{
aa9694bb 6401 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6402
1e577f97
SB
6403 __memory_events_show(m, memcg->memory_events);
6404 return 0;
6405}
6406
6407static int memory_events_local_show(struct seq_file *m, void *v)
6408{
6409 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6410
1e577f97 6411 __memory_events_show(m, memcg->memory_events_local);
241994ed
JW
6412 return 0;
6413}
6414
587d9f72
JW
6415static int memory_stat_show(struct seq_file *m, void *v)
6416{
aa9694bb 6417 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
c8713d0b 6418 char *buf;
1ff9e6e1 6419
c8713d0b
JW
6420 buf = memory_stat_format(memcg);
6421 if (!buf)
6422 return -ENOMEM;
6423 seq_puts(m, buf);
6424 kfree(buf);
587d9f72
JW
6425 return 0;
6426}
6427
5f9a4f4a
MS
6428#ifdef CONFIG_NUMA
6429static int memory_numa_stat_show(struct seq_file *m, void *v)
6430{
6431 int i;
6432 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6433
6434 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6435 int nid;
6436
6437 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6438 continue;
6439
6440 seq_printf(m, "%s", memory_stats[i].name);
6441 for_each_node_state(nid, N_MEMORY) {
6442 u64 size;
6443 struct lruvec *lruvec;
6444
6445 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6446 size = lruvec_page_state(lruvec, memory_stats[i].idx);
6447 size *= memory_stats[i].ratio;
6448 seq_printf(m, " N%d=%llu", nid, size);
6449 }
6450 seq_putc(m, '\n');
6451 }
6452
6453 return 0;
6454}
6455#endif
6456
3d8b38eb
RG
6457static int memory_oom_group_show(struct seq_file *m, void *v)
6458{
aa9694bb 6459 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3d8b38eb
RG
6460
6461 seq_printf(m, "%d\n", memcg->oom_group);
6462
6463 return 0;
6464}
6465
6466static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6467 char *buf, size_t nbytes, loff_t off)
6468{
6469 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6470 int ret, oom_group;
6471
6472 buf = strstrip(buf);
6473 if (!buf)
6474 return -EINVAL;
6475
6476 ret = kstrtoint(buf, 0, &oom_group);
6477 if (ret)
6478 return ret;
6479
6480 if (oom_group != 0 && oom_group != 1)
6481 return -EINVAL;
6482
6483 memcg->oom_group = oom_group;
6484
6485 return nbytes;
6486}
6487
241994ed
JW
6488static struct cftype memory_files[] = {
6489 {
6490 .name = "current",
f5fc3c5d 6491 .flags = CFTYPE_NOT_ON_ROOT,
241994ed
JW
6492 .read_u64 = memory_current_read,
6493 },
bf8d5d52
RG
6494 {
6495 .name = "min",
6496 .flags = CFTYPE_NOT_ON_ROOT,
6497 .seq_show = memory_min_show,
6498 .write = memory_min_write,
6499 },
241994ed
JW
6500 {
6501 .name = "low",
6502 .flags = CFTYPE_NOT_ON_ROOT,
6503 .seq_show = memory_low_show,
6504 .write = memory_low_write,
6505 },
6506 {
6507 .name = "high",
6508 .flags = CFTYPE_NOT_ON_ROOT,
6509 .seq_show = memory_high_show,
6510 .write = memory_high_write,
6511 },
6512 {
6513 .name = "max",
6514 .flags = CFTYPE_NOT_ON_ROOT,
6515 .seq_show = memory_max_show,
6516 .write = memory_max_write,
6517 },
6518 {
6519 .name = "events",
6520 .flags = CFTYPE_NOT_ON_ROOT,
472912a2 6521 .file_offset = offsetof(struct mem_cgroup, events_file),
241994ed
JW
6522 .seq_show = memory_events_show,
6523 },
1e577f97
SB
6524 {
6525 .name = "events.local",
6526 .flags = CFTYPE_NOT_ON_ROOT,
6527 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6528 .seq_show = memory_events_local_show,
6529 },
587d9f72
JW
6530 {
6531 .name = "stat",
587d9f72
JW
6532 .seq_show = memory_stat_show,
6533 },
5f9a4f4a
MS
6534#ifdef CONFIG_NUMA
6535 {
6536 .name = "numa_stat",
6537 .seq_show = memory_numa_stat_show,
6538 },
6539#endif
3d8b38eb
RG
6540 {
6541 .name = "oom.group",
6542 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6543 .seq_show = memory_oom_group_show,
6544 .write = memory_oom_group_write,
6545 },
241994ed
JW
6546 { } /* terminate */
6547};
6548
073219e9 6549struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 6550 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 6551 .css_online = mem_cgroup_css_online,
92fb9748 6552 .css_offline = mem_cgroup_css_offline,
6df38689 6553 .css_released = mem_cgroup_css_released,
92fb9748 6554 .css_free = mem_cgroup_css_free,
1ced953b 6555 .css_reset = mem_cgroup_css_reset,
7dc74be0
DN
6556 .can_attach = mem_cgroup_can_attach,
6557 .cancel_attach = mem_cgroup_cancel_attach,
264a0ae1 6558 .post_attach = mem_cgroup_move_task,
f00baae7 6559 .bind = mem_cgroup_bind,
241994ed
JW
6560 .dfl_cftypes = memory_files,
6561 .legacy_cftypes = mem_cgroup_legacy_files,
6d12e2d8 6562 .early_init = 0,
8cdea7c0 6563};
c077719b 6564
bc50bcc6
JW
6565/*
6566 * This function calculates an individual cgroup's effective
6567 * protection which is derived from its own memory.min/low, its
6568 * parent's and siblings' settings, as well as the actual memory
6569 * distribution in the tree.
6570 *
6571 * The following rules apply to the effective protection values:
6572 *
6573 * 1. At the first level of reclaim, effective protection is equal to
6574 * the declared protection in memory.min and memory.low.
6575 *
6576 * 2. To enable safe delegation of the protection configuration, at
6577 * subsequent levels the effective protection is capped to the
6578 * parent's effective protection.
6579 *
6580 * 3. To make complex and dynamic subtrees easier to configure, the
6581 * user is allowed to overcommit the declared protection at a given
6582 * level. If that is the case, the parent's effective protection is
6583 * distributed to the children in proportion to how much protection
6584 * they have declared and how much of it they are utilizing.
6585 *
6586 * This makes distribution proportional, but also work-conserving:
6587 * if one cgroup claims much more protection than it uses memory,
6588 * the unused remainder is available to its siblings.
6589 *
6590 * 4. Conversely, when the declared protection is undercommitted at a
6591 * given level, the distribution of the larger parental protection
6592 * budget is NOT proportional. A cgroup's protection from a sibling
6593 * is capped to its own memory.min/low setting.
6594 *
8a931f80
JW
6595 * 5. However, to allow protecting recursive subtrees from each other
6596 * without having to declare each individual cgroup's fixed share
6597 * of the ancestor's claim to protection, any unutilized -
6598 * "floating" - protection from up the tree is distributed in
6599 * proportion to each cgroup's *usage*. This makes the protection
6600 * neutral wrt sibling cgroups and lets them compete freely over
6601 * the shared parental protection budget, but it protects the
6602 * subtree as a whole from neighboring subtrees.
6603 *
6604 * Note that 4. and 5. are not in conflict: 4. is about protecting
6605 * against immediate siblings whereas 5. is about protecting against
6606 * neighboring subtrees.
bc50bcc6
JW
6607 */
6608static unsigned long effective_protection(unsigned long usage,
8a931f80 6609 unsigned long parent_usage,
bc50bcc6
JW
6610 unsigned long setting,
6611 unsigned long parent_effective,
6612 unsigned long siblings_protected)
6613{
6614 unsigned long protected;
8a931f80 6615 unsigned long ep;
bc50bcc6
JW
6616
6617 protected = min(usage, setting);
6618 /*
6619 * If all cgroups at this level combined claim and use more
6620 * protection then what the parent affords them, distribute
6621 * shares in proportion to utilization.
6622 *
6623 * We are using actual utilization rather than the statically
6624 * claimed protection in order to be work-conserving: claimed
6625 * but unused protection is available to siblings that would
6626 * otherwise get a smaller chunk than what they claimed.
6627 */
6628 if (siblings_protected > parent_effective)
6629 return protected * parent_effective / siblings_protected;
6630
6631 /*
6632 * Ok, utilized protection of all children is within what the
6633 * parent affords them, so we know whatever this child claims
6634 * and utilizes is effectively protected.
6635 *
6636 * If there is unprotected usage beyond this value, reclaim
6637 * will apply pressure in proportion to that amount.
6638 *
6639 * If there is unutilized protection, the cgroup will be fully
6640 * shielded from reclaim, but we do return a smaller value for
6641 * protection than what the group could enjoy in theory. This
6642 * is okay. With the overcommit distribution above, effective
6643 * protection is always dependent on how memory is actually
6644 * consumed among the siblings anyway.
6645 */
8a931f80
JW
6646 ep = protected;
6647
6648 /*
6649 * If the children aren't claiming (all of) the protection
6650 * afforded to them by the parent, distribute the remainder in
6651 * proportion to the (unprotected) memory of each cgroup. That
6652 * way, cgroups that aren't explicitly prioritized wrt each
6653 * other compete freely over the allowance, but they are
6654 * collectively protected from neighboring trees.
6655 *
6656 * We're using unprotected memory for the weight so that if
6657 * some cgroups DO claim explicit protection, we don't protect
6658 * the same bytes twice.
cd324edc
JW
6659 *
6660 * Check both usage and parent_usage against the respective
6661 * protected values. One should imply the other, but they
6662 * aren't read atomically - make sure the division is sane.
8a931f80
JW
6663 */
6664 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6665 return ep;
cd324edc
JW
6666 if (parent_effective > siblings_protected &&
6667 parent_usage > siblings_protected &&
6668 usage > protected) {
8a931f80
JW
6669 unsigned long unclaimed;
6670
6671 unclaimed = parent_effective - siblings_protected;
6672 unclaimed *= usage - protected;
6673 unclaimed /= parent_usage - siblings_protected;
6674
6675 ep += unclaimed;
6676 }
6677
6678 return ep;
bc50bcc6
JW
6679}
6680
241994ed 6681/**
bf8d5d52 6682 * mem_cgroup_protected - check if memory consumption is in the normal range
34c81057 6683 * @root: the top ancestor of the sub-tree being checked
241994ed
JW
6684 * @memcg: the memory cgroup to check
6685 *
23067153
RG
6686 * WARNING: This function is not stateless! It can only be used as part
6687 * of a top-down tree iteration, not for isolated queries.
241994ed 6688 */
45c7f7e1
CD
6689void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6690 struct mem_cgroup *memcg)
241994ed 6691{
8a931f80 6692 unsigned long usage, parent_usage;
23067153
RG
6693 struct mem_cgroup *parent;
6694
241994ed 6695 if (mem_cgroup_disabled())
45c7f7e1 6696 return;
241994ed 6697
34c81057
SC
6698 if (!root)
6699 root = root_mem_cgroup;
22f7496f
YS
6700
6701 /*
6702 * Effective values of the reclaim targets are ignored so they
6703 * can be stale. Have a look at mem_cgroup_protection for more
6704 * details.
6705 * TODO: calculation should be more robust so that we do not need
6706 * that special casing.
6707 */
34c81057 6708 if (memcg == root)
45c7f7e1 6709 return;
241994ed 6710
23067153 6711 usage = page_counter_read(&memcg->memory);
bf8d5d52 6712 if (!usage)
45c7f7e1 6713 return;
bf8d5d52 6714
bf8d5d52 6715 parent = parent_mem_cgroup(memcg);
df2a4196
RG
6716 /* No parent means a non-hierarchical mode on v1 memcg */
6717 if (!parent)
45c7f7e1 6718 return;
df2a4196 6719
bc50bcc6 6720 if (parent == root) {
c3d53200 6721 memcg->memory.emin = READ_ONCE(memcg->memory.min);
03960e33 6722 memcg->memory.elow = READ_ONCE(memcg->memory.low);
45c7f7e1 6723 return;
bf8d5d52
RG
6724 }
6725
8a931f80
JW
6726 parent_usage = page_counter_read(&parent->memory);
6727
b3a7822e 6728 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
c3d53200
CD
6729 READ_ONCE(memcg->memory.min),
6730 READ_ONCE(parent->memory.emin),
b3a7822e 6731 atomic_long_read(&parent->memory.children_min_usage)));
23067153 6732
b3a7822e 6733 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
03960e33
CD
6734 READ_ONCE(memcg->memory.low),
6735 READ_ONCE(parent->memory.elow),
b3a7822e 6736 atomic_long_read(&parent->memory.children_low_usage)));
241994ed
JW
6737}
6738
00501b53 6739/**
f0e45fb4 6740 * mem_cgroup_charge - charge a newly allocated page to a cgroup
00501b53
JW
6741 * @page: page to charge
6742 * @mm: mm context of the victim
6743 * @gfp_mask: reclaim mode
00501b53
JW
6744 *
6745 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6746 * pages according to @gfp_mask if necessary.
6747 *
f0e45fb4 6748 * Returns 0 on success. Otherwise, an error code is returned.
00501b53 6749 */
d9eb1ea2 6750int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
00501b53 6751{
6c357848 6752 unsigned int nr_pages = thp_nr_pages(page);
00501b53 6753 struct mem_cgroup *memcg = NULL;
00501b53
JW
6754 int ret = 0;
6755
6756 if (mem_cgroup_disabled())
6757 goto out;
6758
6759 if (PageSwapCache(page)) {
2d1c4980
JW
6760 swp_entry_t ent = { .val = page_private(page), };
6761 unsigned short id;
6762
00501b53
JW
6763 /*
6764 * Every swap fault against a single page tries to charge the
6765 * page, bail as early as possible. shmem_unuse() encounters
eccb52e7
JW
6766 * already charged pages, too. page->mem_cgroup is protected
6767 * by the page lock, which serializes swap cache removal, which
00501b53
JW
6768 * in turn serializes uncharging.
6769 */
e993d905 6770 VM_BUG_ON_PAGE(!PageLocked(page), page);
abe2895b 6771 if (compound_head(page)->mem_cgroup)
00501b53 6772 goto out;
e993d905 6773
2d1c4980
JW
6774 id = lookup_swap_cgroup_id(ent);
6775 rcu_read_lock();
6776 memcg = mem_cgroup_from_id(id);
6777 if (memcg && !css_tryget_online(&memcg->css))
6778 memcg = NULL;
6779 rcu_read_unlock();
00501b53
JW
6780 }
6781
00501b53
JW
6782 if (!memcg)
6783 memcg = get_mem_cgroup_from_mm(mm);
6784
6785 ret = try_charge(memcg, gfp_mask, nr_pages);
f0e45fb4
JW
6786 if (ret)
6787 goto out_put;
00501b53 6788
1a3e1f40 6789 css_get(&memcg->css);
d9eb1ea2 6790 commit_charge(page, memcg);
6abb5a86 6791
6abb5a86 6792 local_irq_disable();
3fba69a5 6793 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6abb5a86
JW
6794 memcg_check_events(memcg, page);
6795 local_irq_enable();
00501b53 6796
2d1c4980 6797 if (PageSwapCache(page)) {
00501b53
JW
6798 swp_entry_t entry = { .val = page_private(page) };
6799 /*
6800 * The swap entry might not get freed for a long time,
6801 * let's not wait for it. The page already received a
6802 * memory+swap charge, drop the swap entry duplicate.
6803 */
38d8b4e6 6804 mem_cgroup_uncharge_swap(entry, nr_pages);
00501b53 6805 }
00501b53 6806
f0e45fb4
JW
6807out_put:
6808 css_put(&memcg->css);
6809out:
6810 return ret;
3fea5a49
JW
6811}
6812
a9d5adee
JG
6813struct uncharge_gather {
6814 struct mem_cgroup *memcg;
9f762dbe 6815 unsigned long nr_pages;
a9d5adee 6816 unsigned long pgpgout;
a9d5adee 6817 unsigned long nr_kmem;
a9d5adee
JG
6818 struct page *dummy_page;
6819};
6820
6821static inline void uncharge_gather_clear(struct uncharge_gather *ug)
747db954 6822{
a9d5adee
JG
6823 memset(ug, 0, sizeof(*ug));
6824}
6825
6826static void uncharge_batch(const struct uncharge_gather *ug)
6827{
747db954
JW
6828 unsigned long flags;
6829
a9d5adee 6830 if (!mem_cgroup_is_root(ug->memcg)) {
9f762dbe 6831 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
7941d214 6832 if (do_memsw_account())
9f762dbe 6833 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
a9d5adee
JG
6834 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6835 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6836 memcg_oom_recover(ug->memcg);
ce00a967 6837 }
747db954
JW
6838
6839 local_irq_save(flags);
c9019e9b 6840 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
9f762dbe 6841 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
a9d5adee 6842 memcg_check_events(ug->memcg, ug->dummy_page);
747db954 6843 local_irq_restore(flags);
f1796544
MH
6844
6845 /* drop reference from uncharge_page */
6846 css_put(&ug->memcg->css);
a9d5adee
JG
6847}
6848
6849static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6850{
9f762dbe
JW
6851 unsigned long nr_pages;
6852
a9d5adee 6853 VM_BUG_ON_PAGE(PageLRU(page), page);
a9d5adee
JG
6854
6855 if (!page->mem_cgroup)
6856 return;
6857
6858 /*
6859 * Nobody should be changing or seriously looking at
6860 * page->mem_cgroup at this point, we have fully
6861 * exclusive access to the page.
6862 */
6863
6864 if (ug->memcg != page->mem_cgroup) {
6865 if (ug->memcg) {
6866 uncharge_batch(ug);
6867 uncharge_gather_clear(ug);
6868 }
6869 ug->memcg = page->mem_cgroup;
f1796544
MH
6870
6871 /* pairs with css_put in uncharge_batch */
6872 css_get(&ug->memcg->css);
a9d5adee
JG
6873 }
6874
9f762dbe
JW
6875 nr_pages = compound_nr(page);
6876 ug->nr_pages += nr_pages;
a9d5adee 6877
9f762dbe 6878 if (!PageKmemcg(page)) {
a9d5adee
JG
6879 ug->pgpgout++;
6880 } else {
9f762dbe 6881 ug->nr_kmem += nr_pages;
a9d5adee
JG
6882 __ClearPageKmemcg(page);
6883 }
6884
6885 ug->dummy_page = page;
6886 page->mem_cgroup = NULL;
1a3e1f40 6887 css_put(&ug->memcg->css);
747db954
JW
6888}
6889
6890static void uncharge_list(struct list_head *page_list)
6891{
a9d5adee 6892 struct uncharge_gather ug;
747db954 6893 struct list_head *next;
a9d5adee
JG
6894
6895 uncharge_gather_clear(&ug);
747db954 6896
8b592656
JW
6897 /*
6898 * Note that the list can be a single page->lru; hence the
6899 * do-while loop instead of a simple list_for_each_entry().
6900 */
747db954
JW
6901 next = page_list->next;
6902 do {
a9d5adee
JG
6903 struct page *page;
6904
747db954
JW
6905 page = list_entry(next, struct page, lru);
6906 next = page->lru.next;
6907
a9d5adee 6908 uncharge_page(page, &ug);
747db954
JW
6909 } while (next != page_list);
6910
a9d5adee
JG
6911 if (ug.memcg)
6912 uncharge_batch(&ug);
747db954
JW
6913}
6914
0a31bc97
JW
6915/**
6916 * mem_cgroup_uncharge - uncharge a page
6917 * @page: page to uncharge
6918 *
f0e45fb4 6919 * Uncharge a page previously charged with mem_cgroup_charge().
0a31bc97
JW
6920 */
6921void mem_cgroup_uncharge(struct page *page)
6922{
a9d5adee
JG
6923 struct uncharge_gather ug;
6924
0a31bc97
JW
6925 if (mem_cgroup_disabled())
6926 return;
6927
747db954 6928 /* Don't touch page->lru of any random page, pre-check: */
1306a85a 6929 if (!page->mem_cgroup)
0a31bc97
JW
6930 return;
6931
a9d5adee
JG
6932 uncharge_gather_clear(&ug);
6933 uncharge_page(page, &ug);
6934 uncharge_batch(&ug);
747db954 6935}
0a31bc97 6936
747db954
JW
6937/**
6938 * mem_cgroup_uncharge_list - uncharge a list of page
6939 * @page_list: list of pages to uncharge
6940 *
6941 * Uncharge a list of pages previously charged with
f0e45fb4 6942 * mem_cgroup_charge().
747db954
JW
6943 */
6944void mem_cgroup_uncharge_list(struct list_head *page_list)
6945{
6946 if (mem_cgroup_disabled())
6947 return;
0a31bc97 6948
747db954
JW
6949 if (!list_empty(page_list))
6950 uncharge_list(page_list);
0a31bc97
JW
6951}
6952
6953/**
6a93ca8f
JW
6954 * mem_cgroup_migrate - charge a page's replacement
6955 * @oldpage: currently circulating page
6956 * @newpage: replacement page
0a31bc97 6957 *
6a93ca8f
JW
6958 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6959 * be uncharged upon free.
0a31bc97
JW
6960 *
6961 * Both pages must be locked, @newpage->mapping must be set up.
6962 */
6a93ca8f 6963void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
0a31bc97 6964{
29833315 6965 struct mem_cgroup *memcg;
44b7a8d3 6966 unsigned int nr_pages;
d93c4130 6967 unsigned long flags;
0a31bc97
JW
6968
6969 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6970 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
0a31bc97 6971 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6abb5a86
JW
6972 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6973 newpage);
0a31bc97
JW
6974
6975 if (mem_cgroup_disabled())
6976 return;
6977
6978 /* Page cache replacement: new page already charged? */
1306a85a 6979 if (newpage->mem_cgroup)
0a31bc97
JW
6980 return;
6981
45637bab 6982 /* Swapcache readahead pages can get replaced before being charged */
1306a85a 6983 memcg = oldpage->mem_cgroup;
29833315 6984 if (!memcg)
0a31bc97
JW
6985 return;
6986
44b7a8d3 6987 /* Force-charge the new page. The old one will be freed soon */
6c357848 6988 nr_pages = thp_nr_pages(newpage);
44b7a8d3
JW
6989
6990 page_counter_charge(&memcg->memory, nr_pages);
6991 if (do_memsw_account())
6992 page_counter_charge(&memcg->memsw, nr_pages);
0a31bc97 6993
1a3e1f40 6994 css_get(&memcg->css);
d9eb1ea2 6995 commit_charge(newpage, memcg);
44b7a8d3 6996
d93c4130 6997 local_irq_save(flags);
3fba69a5 6998 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
44b7a8d3 6999 memcg_check_events(memcg, newpage);
d93c4130 7000 local_irq_restore(flags);
0a31bc97
JW
7001}
7002
ef12947c 7003DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
11092087
JW
7004EXPORT_SYMBOL(memcg_sockets_enabled_key);
7005
2d758073 7006void mem_cgroup_sk_alloc(struct sock *sk)
11092087
JW
7007{
7008 struct mem_cgroup *memcg;
7009
2d758073
JW
7010 if (!mem_cgroup_sockets_enabled)
7011 return;
7012
e876ecc6
SB
7013 /* Do not associate the sock with unrelated interrupted task's memcg. */
7014 if (in_interrupt())
7015 return;
7016
11092087
JW
7017 rcu_read_lock();
7018 memcg = mem_cgroup_from_task(current);
f7e1cb6e
JW
7019 if (memcg == root_mem_cgroup)
7020 goto out;
0db15298 7021 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
f7e1cb6e 7022 goto out;
8965aa28 7023 if (css_tryget(&memcg->css))
11092087 7024 sk->sk_memcg = memcg;
f7e1cb6e 7025out:
11092087
JW
7026 rcu_read_unlock();
7027}
11092087 7028
2d758073 7029void mem_cgroup_sk_free(struct sock *sk)
11092087 7030{
2d758073
JW
7031 if (sk->sk_memcg)
7032 css_put(&sk->sk_memcg->css);
11092087
JW
7033}
7034
7035/**
7036 * mem_cgroup_charge_skmem - charge socket memory
7037 * @memcg: memcg to charge
7038 * @nr_pages: number of pages to charge
7039 *
7040 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7041 * @memcg's configured limit, %false if the charge had to be forced.
7042 */
7043bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7044{
f7e1cb6e 7045 gfp_t gfp_mask = GFP_KERNEL;
11092087 7046
f7e1cb6e 7047 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7048 struct page_counter *fail;
f7e1cb6e 7049
0db15298
JW
7050 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7051 memcg->tcpmem_pressure = 0;
f7e1cb6e
JW
7052 return true;
7053 }
0db15298
JW
7054 page_counter_charge(&memcg->tcpmem, nr_pages);
7055 memcg->tcpmem_pressure = 1;
f7e1cb6e 7056 return false;
11092087 7057 }
d886f4e4 7058
f7e1cb6e
JW
7059 /* Don't block in the packet receive path */
7060 if (in_softirq())
7061 gfp_mask = GFP_NOWAIT;
7062
c9019e9b 7063 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
b2807f07 7064
f7e1cb6e
JW
7065 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7066 return true;
7067
7068 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
11092087
JW
7069 return false;
7070}
7071
7072/**
7073 * mem_cgroup_uncharge_skmem - uncharge socket memory
b7701a5f
MR
7074 * @memcg: memcg to uncharge
7075 * @nr_pages: number of pages to uncharge
11092087
JW
7076 */
7077void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7078{
f7e1cb6e 7079 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7080 page_counter_uncharge(&memcg->tcpmem, nr_pages);
f7e1cb6e
JW
7081 return;
7082 }
d886f4e4 7083
c9019e9b 7084 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
b2807f07 7085
475d0487 7086 refill_stock(memcg, nr_pages);
11092087
JW
7087}
7088
f7e1cb6e
JW
7089static int __init cgroup_memory(char *s)
7090{
7091 char *token;
7092
7093 while ((token = strsep(&s, ",")) != NULL) {
7094 if (!*token)
7095 continue;
7096 if (!strcmp(token, "nosocket"))
7097 cgroup_memory_nosocket = true;
04823c83
VD
7098 if (!strcmp(token, "nokmem"))
7099 cgroup_memory_nokmem = true;
f7e1cb6e
JW
7100 }
7101 return 0;
7102}
7103__setup("cgroup.memory=", cgroup_memory);
11092087 7104
2d11085e 7105/*
1081312f
MH
7106 * subsys_initcall() for memory controller.
7107 *
308167fc
SAS
7108 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7109 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7110 * basically everything that doesn't depend on a specific mem_cgroup structure
7111 * should be initialized from here.
2d11085e
MH
7112 */
7113static int __init mem_cgroup_init(void)
7114{
95a045f6
JW
7115 int cpu, node;
7116
308167fc
SAS
7117 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7118 memcg_hotplug_cpu_dead);
95a045f6
JW
7119
7120 for_each_possible_cpu(cpu)
7121 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7122 drain_local_stock);
7123
7124 for_each_node(node) {
7125 struct mem_cgroup_tree_per_node *rtpn;
95a045f6
JW
7126
7127 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7128 node_online(node) ? node : NUMA_NO_NODE);
7129
ef8f2327 7130 rtpn->rb_root = RB_ROOT;
fa90b2fd 7131 rtpn->rb_rightmost = NULL;
ef8f2327 7132 spin_lock_init(&rtpn->lock);
95a045f6
JW
7133 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7134 }
7135
2d11085e
MH
7136 return 0;
7137}
7138subsys_initcall(mem_cgroup_init);
21afa38e
JW
7139
7140#ifdef CONFIG_MEMCG_SWAP
358c07fc
AB
7141static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7142{
1c2d479a 7143 while (!refcount_inc_not_zero(&memcg->id.ref)) {
358c07fc
AB
7144 /*
7145 * The root cgroup cannot be destroyed, so it's refcount must
7146 * always be >= 1.
7147 */
7148 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7149 VM_BUG_ON(1);
7150 break;
7151 }
7152 memcg = parent_mem_cgroup(memcg);
7153 if (!memcg)
7154 memcg = root_mem_cgroup;
7155 }
7156 return memcg;
7157}
7158
21afa38e
JW
7159/**
7160 * mem_cgroup_swapout - transfer a memsw charge to swap
7161 * @page: page whose memsw charge to transfer
7162 * @entry: swap entry to move the charge to
7163 *
7164 * Transfer the memsw charge of @page to @entry.
7165 */
7166void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7167{
1f47b61f 7168 struct mem_cgroup *memcg, *swap_memcg;
d6810d73 7169 unsigned int nr_entries;
21afa38e
JW
7170 unsigned short oldid;
7171
7172 VM_BUG_ON_PAGE(PageLRU(page), page);
7173 VM_BUG_ON_PAGE(page_count(page), page);
7174
2d1c4980 7175 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
21afa38e
JW
7176 return;
7177
7178 memcg = page->mem_cgroup;
7179
7180 /* Readahead page, never charged */
7181 if (!memcg)
7182 return;
7183
1f47b61f
VD
7184 /*
7185 * In case the memcg owning these pages has been offlined and doesn't
7186 * have an ID allocated to it anymore, charge the closest online
7187 * ancestor for the swap instead and transfer the memory+swap charge.
7188 */
7189 swap_memcg = mem_cgroup_id_get_online(memcg);
6c357848 7190 nr_entries = thp_nr_pages(page);
d6810d73
HY
7191 /* Get references for the tail pages, too */
7192 if (nr_entries > 1)
7193 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7194 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7195 nr_entries);
21afa38e 7196 VM_BUG_ON_PAGE(oldid, page);
c9019e9b 7197 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
21afa38e
JW
7198
7199 page->mem_cgroup = NULL;
7200
7201 if (!mem_cgroup_is_root(memcg))
d6810d73 7202 page_counter_uncharge(&memcg->memory, nr_entries);
21afa38e 7203
2d1c4980 7204 if (!cgroup_memory_noswap && memcg != swap_memcg) {
1f47b61f 7205 if (!mem_cgroup_is_root(swap_memcg))
d6810d73
HY
7206 page_counter_charge(&swap_memcg->memsw, nr_entries);
7207 page_counter_uncharge(&memcg->memsw, nr_entries);
1f47b61f
VD
7208 }
7209
ce9ce665
SAS
7210 /*
7211 * Interrupts should be disabled here because the caller holds the
b93b0163 7212 * i_pages lock which is taken with interrupts-off. It is
ce9ce665 7213 * important here to have the interrupts disabled because it is the
b93b0163 7214 * only synchronisation we have for updating the per-CPU variables.
ce9ce665
SAS
7215 */
7216 VM_BUG_ON(!irqs_disabled());
3fba69a5 7217 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
21afa38e 7218 memcg_check_events(memcg, page);
73f576c0 7219
1a3e1f40 7220 css_put(&memcg->css);
21afa38e
JW
7221}
7222
38d8b4e6
HY
7223/**
7224 * mem_cgroup_try_charge_swap - try charging swap space for a page
37e84351
VD
7225 * @page: page being added to swap
7226 * @entry: swap entry to charge
7227 *
38d8b4e6 7228 * Try to charge @page's memcg for the swap space at @entry.
37e84351
VD
7229 *
7230 * Returns 0 on success, -ENOMEM on failure.
7231 */
7232int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7233{
6c357848 7234 unsigned int nr_pages = thp_nr_pages(page);
37e84351 7235 struct page_counter *counter;
38d8b4e6 7236 struct mem_cgroup *memcg;
37e84351
VD
7237 unsigned short oldid;
7238
2d1c4980 7239 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
37e84351
VD
7240 return 0;
7241
7242 memcg = page->mem_cgroup;
7243
7244 /* Readahead page, never charged */
7245 if (!memcg)
7246 return 0;
7247
f3a53a3a
TH
7248 if (!entry.val) {
7249 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
bb98f2c5 7250 return 0;
f3a53a3a 7251 }
bb98f2c5 7252
1f47b61f
VD
7253 memcg = mem_cgroup_id_get_online(memcg);
7254
2d1c4980 7255 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
38d8b4e6 7256 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
f3a53a3a
TH
7257 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7258 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
1f47b61f 7259 mem_cgroup_id_put(memcg);
37e84351 7260 return -ENOMEM;
1f47b61f 7261 }
37e84351 7262
38d8b4e6
HY
7263 /* Get references for the tail pages, too */
7264 if (nr_pages > 1)
7265 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7266 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
37e84351 7267 VM_BUG_ON_PAGE(oldid, page);
c9019e9b 7268 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
37e84351 7269
37e84351
VD
7270 return 0;
7271}
7272
21afa38e 7273/**
38d8b4e6 7274 * mem_cgroup_uncharge_swap - uncharge swap space
21afa38e 7275 * @entry: swap entry to uncharge
38d8b4e6 7276 * @nr_pages: the amount of swap space to uncharge
21afa38e 7277 */
38d8b4e6 7278void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
21afa38e
JW
7279{
7280 struct mem_cgroup *memcg;
7281 unsigned short id;
7282
38d8b4e6 7283 id = swap_cgroup_record(entry, 0, nr_pages);
21afa38e 7284 rcu_read_lock();
adbe427b 7285 memcg = mem_cgroup_from_id(id);
21afa38e 7286 if (memcg) {
2d1c4980 7287 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
37e84351 7288 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
38d8b4e6 7289 page_counter_uncharge(&memcg->swap, nr_pages);
37e84351 7290 else
38d8b4e6 7291 page_counter_uncharge(&memcg->memsw, nr_pages);
37e84351 7292 }
c9019e9b 7293 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
38d8b4e6 7294 mem_cgroup_id_put_many(memcg, nr_pages);
21afa38e
JW
7295 }
7296 rcu_read_unlock();
7297}
7298
d8b38438
VD
7299long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7300{
7301 long nr_swap_pages = get_nr_swap_pages();
7302
eccb52e7 7303 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
d8b38438
VD
7304 return nr_swap_pages;
7305 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7306 nr_swap_pages = min_t(long, nr_swap_pages,
bbec2e15 7307 READ_ONCE(memcg->swap.max) -
d8b38438
VD
7308 page_counter_read(&memcg->swap));
7309 return nr_swap_pages;
7310}
7311
5ccc5aba
VD
7312bool mem_cgroup_swap_full(struct page *page)
7313{
7314 struct mem_cgroup *memcg;
7315
7316 VM_BUG_ON_PAGE(!PageLocked(page), page);
7317
7318 if (vm_swap_full())
7319 return true;
eccb52e7 7320 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5ccc5aba
VD
7321 return false;
7322
7323 memcg = page->mem_cgroup;
7324 if (!memcg)
7325 return false;
7326
4b82ab4f
JK
7327 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7328 unsigned long usage = page_counter_read(&memcg->swap);
7329
7330 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7331 usage * 2 >= READ_ONCE(memcg->swap.max))
5ccc5aba 7332 return true;
4b82ab4f 7333 }
5ccc5aba
VD
7334
7335 return false;
7336}
7337
eccb52e7 7338static int __init setup_swap_account(char *s)
21afa38e
JW
7339{
7340 if (!strcmp(s, "1"))
eccb52e7 7341 cgroup_memory_noswap = 0;
21afa38e 7342 else if (!strcmp(s, "0"))
eccb52e7 7343 cgroup_memory_noswap = 1;
21afa38e
JW
7344 return 1;
7345}
eccb52e7 7346__setup("swapaccount=", setup_swap_account);
21afa38e 7347
37e84351
VD
7348static u64 swap_current_read(struct cgroup_subsys_state *css,
7349 struct cftype *cft)
7350{
7351 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7352
7353 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7354}
7355
4b82ab4f
JK
7356static int swap_high_show(struct seq_file *m, void *v)
7357{
7358 return seq_puts_memcg_tunable(m,
7359 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7360}
7361
7362static ssize_t swap_high_write(struct kernfs_open_file *of,
7363 char *buf, size_t nbytes, loff_t off)
7364{
7365 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7366 unsigned long high;
7367 int err;
7368
7369 buf = strstrip(buf);
7370 err = page_counter_memparse(buf, "max", &high);
7371 if (err)
7372 return err;
7373
7374 page_counter_set_high(&memcg->swap, high);
7375
7376 return nbytes;
7377}
7378
37e84351
VD
7379static int swap_max_show(struct seq_file *m, void *v)
7380{
677dc973
CD
7381 return seq_puts_memcg_tunable(m,
7382 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
37e84351
VD
7383}
7384
7385static ssize_t swap_max_write(struct kernfs_open_file *of,
7386 char *buf, size_t nbytes, loff_t off)
7387{
7388 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7389 unsigned long max;
7390 int err;
7391
7392 buf = strstrip(buf);
7393 err = page_counter_memparse(buf, "max", &max);
7394 if (err)
7395 return err;
7396
be09102b 7397 xchg(&memcg->swap.max, max);
37e84351
VD
7398
7399 return nbytes;
7400}
7401
f3a53a3a
TH
7402static int swap_events_show(struct seq_file *m, void *v)
7403{
aa9694bb 7404 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
f3a53a3a 7405
4b82ab4f
JK
7406 seq_printf(m, "high %lu\n",
7407 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
f3a53a3a
TH
7408 seq_printf(m, "max %lu\n",
7409 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7410 seq_printf(m, "fail %lu\n",
7411 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7412
7413 return 0;
7414}
7415
37e84351
VD
7416static struct cftype swap_files[] = {
7417 {
7418 .name = "swap.current",
7419 .flags = CFTYPE_NOT_ON_ROOT,
7420 .read_u64 = swap_current_read,
7421 },
4b82ab4f
JK
7422 {
7423 .name = "swap.high",
7424 .flags = CFTYPE_NOT_ON_ROOT,
7425 .seq_show = swap_high_show,
7426 .write = swap_high_write,
7427 },
37e84351
VD
7428 {
7429 .name = "swap.max",
7430 .flags = CFTYPE_NOT_ON_ROOT,
7431 .seq_show = swap_max_show,
7432 .write = swap_max_write,
7433 },
f3a53a3a
TH
7434 {
7435 .name = "swap.events",
7436 .flags = CFTYPE_NOT_ON_ROOT,
7437 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7438 .seq_show = swap_events_show,
7439 },
37e84351
VD
7440 { } /* terminate */
7441};
7442
eccb52e7 7443static struct cftype memsw_files[] = {
21afa38e
JW
7444 {
7445 .name = "memsw.usage_in_bytes",
7446 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7447 .read_u64 = mem_cgroup_read_u64,
7448 },
7449 {
7450 .name = "memsw.max_usage_in_bytes",
7451 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7452 .write = mem_cgroup_reset,
7453 .read_u64 = mem_cgroup_read_u64,
7454 },
7455 {
7456 .name = "memsw.limit_in_bytes",
7457 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7458 .write = mem_cgroup_write,
7459 .read_u64 = mem_cgroup_read_u64,
7460 },
7461 {
7462 .name = "memsw.failcnt",
7463 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7464 .write = mem_cgroup_reset,
7465 .read_u64 = mem_cgroup_read_u64,
7466 },
7467 { }, /* terminate */
7468};
7469
82ff165c
BS
7470/*
7471 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7472 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7473 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7474 * boot parameter. This may result in premature OOPS inside
7475 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7476 */
21afa38e
JW
7477static int __init mem_cgroup_swap_init(void)
7478{
2d1c4980
JW
7479 /* No memory control -> no swap control */
7480 if (mem_cgroup_disabled())
7481 cgroup_memory_noswap = true;
7482
7483 if (cgroup_memory_noswap)
eccb52e7
JW
7484 return 0;
7485
7486 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7487 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7488
21afa38e
JW
7489 return 0;
7490}
82ff165c 7491core_initcall(mem_cgroup_swap_init);
21afa38e
JW
7492
7493#endif /* CONFIG_MEMCG_SWAP */