]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/memcontrol.h
net-memcg: pass in gfp_t mask to mem_cgroup_charge_skmem()
[mirror_ubuntu-jammy-kernel.git] / include / linux / memcontrol.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
8cdea7c0
BS
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
8cdea7c0
BS
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
f8d66542 13#include <linux/cgroup.h>
456f998e 14#include <linux/vm_event_item.h>
7ae1e1d0 15#include <linux/hardirq.h>
a8964b9b 16#include <linux/jump_label.h>
33398cf2
MH
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
00f3ca2c
JW
20#include <linux/mm.h>
21#include <linux/vmstat.h>
33398cf2 22#include <linux/writeback.h>
fdf1cdb9 23#include <linux/page-flags.h>
456f998e 24
78fb7466 25struct mem_cgroup;
bf4f0599 26struct obj_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
2633d7a0 29struct kmem_cache;
78fb7466 30
71cd3113
JW
31/* Cgroup-specific page state, on top of universal node page state */
32enum memcg_stat_item {
468c3982 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
71cd3113 34 MEMCG_SOCK,
772616b0 35 MEMCG_PERCPU_B,
b2807f07 36 MEMCG_NR_STAT,
2a7106f2
GT
37};
38
e27be240
JW
39enum memcg_memory_event {
40 MEMCG_LOW,
71cd3113
JW
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
fe6bdfc8 44 MEMCG_OOM_KILL,
4b82ab4f 45 MEMCG_SWAP_HIGH,
f3a53a3a
TH
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
e27be240 48 MEMCG_NR_MEMORY_EVENTS,
71cd3113
JW
49};
50
5660048c 51struct mem_cgroup_reclaim_cookie {
ef8f2327 52 pg_data_t *pgdat;
5660048c
JW
53 unsigned int generation;
54};
55
71cd3113
JW
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
1c2d479a 63 refcount_t ref;
71cd3113
JW
64};
65
33398cf2
MH
66/*
67 * Per memcg event counter is incremented at every pagein/pageout. With THP,
0845f831
RD
68 * it will be incremented by the number of pages. This counter is used
69 * to trigger some periodic events. This is straightforward and better
33398cf2
MH
70 * than using jiffies etc. to handle periodic memcg event.
71 */
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
33398cf2
MH
75 MEM_CGROUP_NTARGETS,
76};
77
871789d4 78struct memcg_vmstats_percpu {
2d146aa3
JW
79 /* Local (CPU and cgroup) page state & events */
80 long state[MEMCG_NR_STAT];
81 unsigned long events[NR_VM_EVENT_ITEMS];
82
83 /* Delta calculation for lockless upward propagation */
84 long state_prev[MEMCG_NR_STAT];
85 unsigned long events_prev[NR_VM_EVENT_ITEMS];
86
87 /* Cgroup1: threshold notifications & softlimit tree updates */
88 unsigned long nr_page_events;
89 unsigned long targets[MEM_CGROUP_NTARGETS];
90};
91
92struct memcg_vmstats {
93 /* Aggregated (CPU and subtree) page state & events */
94 long state[MEMCG_NR_STAT];
95 unsigned long events[NR_VM_EVENT_ITEMS];
96
97 /* Pending child counts during tree propagation */
98 long state_pending[MEMCG_NR_STAT];
99 unsigned long events_pending[NR_VM_EVENT_ITEMS];
33398cf2
MH
100};
101
102struct mem_cgroup_reclaim_iter {
103 struct mem_cgroup *position;
104 /* scan generation, increased every round-trip */
105 unsigned int generation;
106};
107
00f3ca2c
JW
108struct lruvec_stat {
109 long count[NR_VM_NODE_STAT_ITEMS];
110};
111
f3344adf
MS
112struct batched_lruvec_stat {
113 s32 count[NR_VM_NODE_STAT_ITEMS];
114};
115
0a4465d3 116/*
3c6f17e6
YS
117 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
118 * shrinkers, which have elements charged to this memcg.
0a4465d3 119 */
e4262c4f 120struct shrinker_info {
0a4465d3 121 struct rcu_head rcu;
3c6f17e6
YS
122 atomic_long_t *nr_deferred;
123 unsigned long *map;
0a4465d3
KT
124};
125
33398cf2 126/*
242c37b4 127 * per-node information in memory controller.
33398cf2 128 */
ef8f2327 129struct mem_cgroup_per_node {
33398cf2 130 struct lruvec lruvec;
a983b5eb 131
f3344adf
MS
132 /*
133 * Legacy local VM stats. This should be struct lruvec_stat and
134 * cannot be optimized to struct batched_lruvec_stat. Because
135 * the threshold of the lruvec_stat_cpu can be as big as
136 * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
137 * filed has no upper limit.
138 */
815744d7
JW
139 struct lruvec_stat __percpu *lruvec_stat_local;
140
141 /* Subtree VM stats (batched updates) */
f3344adf 142 struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
a983b5eb
JW
143 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
144
b4536f0c 145 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2 146
9da83f3f 147 struct mem_cgroup_reclaim_iter iter;
33398cf2 148
e4262c4f 149 struct shrinker_info __rcu *shrinker_info;
0a432dcb 150
33398cf2
MH
151 struct rb_node tree_node; /* RB tree node */
152 unsigned long usage_in_excess;/* Set to the value by which */
153 /* the soft limit is exceeded*/
154 bool on_tree;
155 struct mem_cgroup *memcg; /* Back pointer, we cannot */
156 /* use container_of */
157};
158
33398cf2
MH
159struct mem_cgroup_threshold {
160 struct eventfd_ctx *eventfd;
161 unsigned long threshold;
162};
163
164/* For threshold */
165struct mem_cgroup_threshold_ary {
166 /* An array index points to threshold just below or equal to usage. */
167 int current_threshold;
168 /* Size of entries[] */
169 unsigned int size;
170 /* Array of thresholds */
307ed94c 171 struct mem_cgroup_threshold entries[];
33398cf2
MH
172};
173
174struct mem_cgroup_thresholds {
175 /* Primary thresholds array */
176 struct mem_cgroup_threshold_ary *primary;
177 /*
178 * Spare threshold array.
179 * This is needed to make mem_cgroup_unregister_event() "never fail".
180 * It must be able to store at least primary->size - 1 entries.
181 */
182 struct mem_cgroup_threshold_ary *spare;
183};
184
567e9ab2
JW
185enum memcg_kmem_state {
186 KMEM_NONE,
187 KMEM_ALLOCATED,
188 KMEM_ONLINE,
189};
190
e81bf979
AL
191#if defined(CONFIG_SMP)
192struct memcg_padding {
193 char x[0];
194} ____cacheline_internodealigned_in_smp;
6a1803bb 195#define MEMCG_PADDING(name) struct memcg_padding name
e81bf979
AL
196#else
197#define MEMCG_PADDING(name)
198#endif
199
97b27821
TH
200/*
201 * Remember four most recent foreign writebacks with dirty pages in this
202 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
203 * one in a given round, we're likely to catch it later if it keeps
204 * foreign-dirtying, so a fairly low count should be enough.
205 *
206 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
207 */
208#define MEMCG_CGWB_FRN_CNT 4
209
210struct memcg_cgwb_frn {
211 u64 bdi_id; /* bdi->id of the foreign inode */
212 int memcg_id; /* memcg->css.id of foreign inode */
213 u64 at; /* jiffies_64 at the time of dirtying */
214 struct wb_completion done; /* tracks in-flight foreign writebacks */
215};
216
bf4f0599
RG
217/*
218 * Bucket for arbitrarily byte-sized objects charged to a memory
219 * cgroup. The bucket can be reparented in one piece when the cgroup
220 * is destroyed, without having to round up the individual references
221 * of all live memory objects in the wild.
222 */
223struct obj_cgroup {
224 struct percpu_ref refcnt;
225 struct mem_cgroup *memcg;
226 atomic_t nr_charged_bytes;
227 union {
228 struct list_head list;
229 struct rcu_head rcu;
230 };
231};
232
33398cf2
MH
233/*
234 * The memory controller data structure. The memory controller controls both
235 * page cache and RSS per cgroup. We would eventually like to provide
236 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
237 * to help the administrator determine what knobs to tune.
238 */
239struct mem_cgroup {
240 struct cgroup_subsys_state css;
241
73f576c0
JW
242 /* Private memcg ID. Used to ID objects that outlive the cgroup */
243 struct mem_cgroup_id id;
244
33398cf2 245 /* Accounted resources */
bd0b230f
WL
246 struct page_counter memory; /* Both v1 & v2 */
247
248 union {
249 struct page_counter swap; /* v2 only */
250 struct page_counter memsw; /* v1 only */
251 };
0db15298
JW
252
253 /* Legacy consumer-oriented counters */
bd0b230f
WL
254 struct page_counter kmem; /* v1 only */
255 struct page_counter tcpmem; /* v1 only */
33398cf2 256
f7e1cb6e
JW
257 /* Range enforcement for interrupt charges */
258 struct work_struct high_work;
259
33398cf2
MH
260 unsigned long soft_limit;
261
262 /* vmpressure notifications */
263 struct vmpressure vmpressure;
264
3d8b38eb
RG
265 /*
266 * Should the OOM killer kill all belonging tasks, had it kill one?
267 */
268 bool oom_group;
269
33398cf2
MH
270 /* protected by memcg_oom_lock */
271 bool oom_lock;
272 int under_oom;
273
274 int swappiness;
275 /* OOM-Killer disable */
276 int oom_kill_disable;
277
1e577f97 278 /* memory.events and memory.events.local */
472912a2 279 struct cgroup_file events_file;
1e577f97 280 struct cgroup_file events_local_file;
472912a2 281
f3a53a3a
TH
282 /* handle for "memory.swap.events" */
283 struct cgroup_file swap_events_file;
284
33398cf2
MH
285 /* protect arrays of thresholds */
286 struct mutex thresholds_lock;
287
288 /* thresholds for memory usage. RCU-protected */
289 struct mem_cgroup_thresholds thresholds;
290
291 /* thresholds for mem+swap usage. RCU-protected */
292 struct mem_cgroup_thresholds memsw_thresholds;
293
294 /* For oom notifier event fd */
295 struct list_head oom_notify;
296
297 /*
298 * Should we move charges of a task when a task is moved into this
299 * mem_cgroup ? And what type of charges should we move ?
300 */
301 unsigned long move_charge_at_immigrate;
e81bf979
AL
302 /* taken only while moving_account > 0 */
303 spinlock_t move_lock;
304 unsigned long move_lock_flags;
305
306 MEMCG_PADDING(_pad1_);
307
2d146aa3
JW
308 /* memory.stat */
309 struct memcg_vmstats vmstats;
42a30035 310
815744d7 311 /* memory.events */
42a30035 312 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
1e577f97 313 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
33398cf2 314
d886f4e4
JW
315 unsigned long socket_pressure;
316
317 /* Legacy tcp memory accounting */
0db15298
JW
318 bool tcpmem_active;
319 int tcpmem_pressure;
d886f4e4 320
84c07d11 321#ifdef CONFIG_MEMCG_KMEM
33398cf2 322 int kmemcg_id;
567e9ab2 323 enum memcg_kmem_state kmem_state;
bf4f0599
RG
324 struct obj_cgroup __rcu *objcg;
325 struct list_head objcg_list; /* list of inherited objcgs */
33398cf2
MH
326#endif
327
4df91062
FT
328 MEMCG_PADDING(_pad2_);
329
330 /*
331 * set > 0 if pages under this cgroup are moving to other cgroup.
332 */
333 atomic_t moving_account;
334 struct task_struct *move_lock_task;
335
4df91062
FT
336 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
337
33398cf2
MH
338#ifdef CONFIG_CGROUP_WRITEBACK
339 struct list_head cgwb_list;
340 struct wb_domain cgwb_domain;
97b27821 341 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
33398cf2
MH
342#endif
343
344 /* List of events which userspace want to receive */
345 struct list_head event_list;
346 spinlock_t event_list_lock;
347
87eaceb3
YS
348#ifdef CONFIG_TRANSPARENT_HUGEPAGE
349 struct deferred_split deferred_split_queue;
350#endif
351
b51478a0 352 struct mem_cgroup_per_node *nodeinfo[];
33398cf2 353};
7d828602 354
a983b5eb
JW
355/*
356 * size of first charge trial. "32" comes from vmscan.c's magic value.
357 * TODO: maybe necessary to use big numbers in big irons.
358 */
359#define MEMCG_CHARGE_BATCH 32U
360
7d828602 361extern struct mem_cgroup *root_mem_cgroup;
56161634 362
87944e29
RG
363enum page_memcg_data_flags {
364 /* page->memcg_data is a pointer to an objcgs vector */
365 MEMCG_DATA_OBJCGS = (1UL << 0),
18b2db3b
RG
366 /* page has been accounted as a non-slab kernel page */
367 MEMCG_DATA_KMEM = (1UL << 1),
87944e29 368 /* the next bit after the last actual flag */
18b2db3b 369 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
87944e29
RG
370};
371
372#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
373
b4e0b68f
MS
374static inline bool PageMemcgKmem(struct page *page);
375
376/*
377 * After the initialization objcg->memcg is always pointing at
378 * a valid memcg, but can be atomically swapped to the parent memcg.
379 *
380 * The caller must ensure that the returned memcg won't be released:
381 * e.g. acquire the rcu_read_lock or css_set_lock.
382 */
383static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
384{
385 return READ_ONCE(objcg->memcg);
386}
387
388/*
389 * __page_memcg - get the memory cgroup associated with a non-kmem page
390 * @page: a pointer to the page struct
391 *
392 * Returns a pointer to the memory cgroup associated with the page,
393 * or NULL. This function assumes that the page is known to have a
394 * proper memory cgroup pointer. It's not safe to call this function
395 * against some type of pages, e.g. slab pages or ex-slab pages or
396 * kmem pages.
397 */
398static inline struct mem_cgroup *__page_memcg(struct page *page)
399{
400 unsigned long memcg_data = page->memcg_data;
401
402 VM_BUG_ON_PAGE(PageSlab(page), page);
403 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
404 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
405
406 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
407}
408
409/*
410 * __page_objcg - get the object cgroup associated with a kmem page
411 * @page: a pointer to the page struct
412 *
413 * Returns a pointer to the object cgroup associated with the page,
414 * or NULL. This function assumes that the page is known to have a
415 * proper object cgroup pointer. It's not safe to call this function
416 * against some type of pages, e.g. slab pages or ex-slab pages or
417 * LRU pages.
418 */
419static inline struct obj_cgroup *__page_objcg(struct page *page)
420{
421 unsigned long memcg_data = page->memcg_data;
422
423 VM_BUG_ON_PAGE(PageSlab(page), page);
424 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
425 VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page);
426
427 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
428}
429
bcfe06bf
RG
430/*
431 * page_memcg - get the memory cgroup associated with a page
432 * @page: a pointer to the page struct
433 *
434 * Returns a pointer to the memory cgroup associated with the page,
435 * or NULL. This function assumes that the page is known to have a
436 * proper memory cgroup pointer. It's not safe to call this function
437 * against some type of pages, e.g. slab pages or ex-slab pages.
438 *
b4e0b68f
MS
439 * For a non-kmem page any of the following ensures page and memcg binding
440 * stability:
441 *
bcfe06bf
RG
442 * - the page lock
443 * - LRU isolation
444 * - lock_page_memcg()
445 * - exclusive reference
b4e0b68f
MS
446 *
447 * For a kmem page a caller should hold an rcu read lock to protect memcg
448 * associated with a kmem page from being released.
bcfe06bf
RG
449 */
450static inline struct mem_cgroup *page_memcg(struct page *page)
451{
b4e0b68f
MS
452 if (PageMemcgKmem(page))
453 return obj_cgroup_memcg(__page_objcg(page));
454 else
455 return __page_memcg(page);
bcfe06bf
RG
456}
457
458/*
459 * page_memcg_rcu - locklessly get the memory cgroup associated with a page
460 * @page: a pointer to the page struct
461 *
462 * Returns a pointer to the memory cgroup associated with the page,
463 * or NULL. This function assumes that the page is known to have a
464 * proper memory cgroup pointer. It's not safe to call this function
465 * against some type of pages, e.g. slab pages or ex-slab pages.
466 */
467static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
468{
b4e0b68f
MS
469 unsigned long memcg_data = READ_ONCE(page->memcg_data);
470
bcfe06bf
RG
471 VM_BUG_ON_PAGE(PageSlab(page), page);
472 WARN_ON_ONCE(!rcu_read_lock_held());
473
b4e0b68f
MS
474 if (memcg_data & MEMCG_DATA_KMEM) {
475 struct obj_cgroup *objcg;
476
477 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
478 return obj_cgroup_memcg(objcg);
479 }
480
481 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
bcfe06bf
RG
482}
483
484/*
485 * page_memcg_check - get the memory cgroup associated with a page
486 * @page: a pointer to the page struct
487 *
488 * Returns a pointer to the memory cgroup associated with the page,
b4e0b68f 489 * or NULL. This function unlike page_memcg() can take any page
bcfe06bf 490 * as an argument. It has to be used in cases when it's not known if a page
b4e0b68f
MS
491 * has an associated memory cgroup pointer or an object cgroups vector or
492 * an object cgroup.
493 *
494 * For a non-kmem page any of the following ensures page and memcg binding
495 * stability:
bcfe06bf 496 *
bcfe06bf
RG
497 * - the page lock
498 * - LRU isolation
499 * - lock_page_memcg()
500 * - exclusive reference
b4e0b68f
MS
501 *
502 * For a kmem page a caller should hold an rcu read lock to protect memcg
503 * associated with a kmem page from being released.
bcfe06bf
RG
504 */
505static inline struct mem_cgroup *page_memcg_check(struct page *page)
506{
507 /*
508 * Because page->memcg_data might be changed asynchronously
509 * for slab pages, READ_ONCE() should be used here.
510 */
511 unsigned long memcg_data = READ_ONCE(page->memcg_data);
512
87944e29 513 if (memcg_data & MEMCG_DATA_OBJCGS)
bcfe06bf
RG
514 return NULL;
515
b4e0b68f
MS
516 if (memcg_data & MEMCG_DATA_KMEM) {
517 struct obj_cgroup *objcg;
518
519 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
520 return obj_cgroup_memcg(objcg);
521 }
522
18b2db3b
RG
523 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
524}
525
bd290e1e 526#ifdef CONFIG_MEMCG_KMEM
18b2db3b
RG
527/*
528 * PageMemcgKmem - check if the page has MemcgKmem flag set
529 * @page: a pointer to the page struct
530 *
531 * Checks if the page has MemcgKmem flag set. The caller must ensure that
532 * the page has an associated memory cgroup. It's not safe to call this function
533 * against some types of pages, e.g. slab pages.
534 */
535static inline bool PageMemcgKmem(struct page *page)
536{
537 VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
538 return page->memcg_data & MEMCG_DATA_KMEM;
bcfe06bf
RG
539}
540
270c6a71
RG
541/*
542 * page_objcgs - get the object cgroups vector associated with a page
543 * @page: a pointer to the page struct
544 *
545 * Returns a pointer to the object cgroups vector associated with the page,
546 * or NULL. This function assumes that the page is known to have an
547 * associated object cgroups vector. It's not safe to call this function
548 * against pages, which might have an associated memory cgroup: e.g.
549 * kernel stack pages.
550 */
551static inline struct obj_cgroup **page_objcgs(struct page *page)
552{
87944e29
RG
553 unsigned long memcg_data = READ_ONCE(page->memcg_data);
554
555 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
18b2db3b 556 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
87944e29
RG
557
558 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
270c6a71
RG
559}
560
561/*
562 * page_objcgs_check - get the object cgroups vector associated with a page
563 * @page: a pointer to the page struct
564 *
565 * Returns a pointer to the object cgroups vector associated with the page,
566 * or NULL. This function is safe to use if the page can be directly associated
567 * with a memory cgroup.
568 */
569static inline struct obj_cgroup **page_objcgs_check(struct page *page)
570{
571 unsigned long memcg_data = READ_ONCE(page->memcg_data);
572
87944e29
RG
573 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
574 return NULL;
270c6a71 575
18b2db3b
RG
576 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
577
87944e29 578 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
270c6a71
RG
579}
580
270c6a71 581#else
bd290e1e
MS
582static inline bool PageMemcgKmem(struct page *page)
583{
584 return false;
585}
586
270c6a71
RG
587static inline struct obj_cgroup **page_objcgs(struct page *page)
588{
589 return NULL;
590}
591
592static inline struct obj_cgroup **page_objcgs_check(struct page *page)
593{
594 return NULL;
595}
270c6a71
RG
596#endif
597
772616b0
RG
598static __always_inline bool memcg_stat_item_in_bytes(int idx)
599{
600 if (idx == MEMCG_PERCPU_B)
601 return true;
602 return vmstat_item_in_bytes(idx);
603}
604
dfd2f10c
KT
605static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
606{
607 return (memcg == root_mem_cgroup);
608}
609
23047a96
JW
610static inline bool mem_cgroup_disabled(void)
611{
612 return !cgroup_subsys_enabled(memory_cgrp_subsys);
613}
614
22f7496f
YS
615static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
616 struct mem_cgroup *memcg,
1bc63fb1 617 bool in_low_reclaim)
9783aa99 618{
1bc63fb1
CD
619 if (mem_cgroup_disabled())
620 return 0;
621
22f7496f
YS
622 /*
623 * There is no reclaim protection applied to a targeted reclaim.
624 * We are special casing this specific case here because
625 * mem_cgroup_protected calculation is not robust enough to keep
626 * the protection invariant for calculated effective values for
627 * parallel reclaimers with different reclaim target. This is
628 * especially a problem for tail memcgs (as they have pages on LRU)
629 * which would want to have effective values 0 for targeted reclaim
630 * but a different value for external reclaim.
631 *
632 * Example
633 * Let's have global and A's reclaim in parallel:
634 * |
635 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
636 * |\
637 * | C (low = 1G, usage = 2.5G)
638 * B (low = 1G, usage = 0.5G)
639 *
640 * For the global reclaim
641 * A.elow = A.low
642 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
643 * C.elow = min(C.usage, C.low)
644 *
645 * With the effective values resetting we have A reclaim
646 * A.elow = 0
647 * B.elow = B.low
648 * C.elow = C.low
649 *
650 * If the global reclaim races with A's reclaim then
651 * B.elow = C.elow = 0 because children_low_usage > A.elow)
652 * is possible and reclaiming B would be violating the protection.
653 *
654 */
655 if (root == memcg)
656 return 0;
657
1bc63fb1
CD
658 if (in_low_reclaim)
659 return READ_ONCE(memcg->memory.emin);
9783aa99 660
1bc63fb1
CD
661 return max(READ_ONCE(memcg->memory.emin),
662 READ_ONCE(memcg->memory.elow));
9783aa99
CD
663}
664
45c7f7e1
CD
665void mem_cgroup_calculate_protection(struct mem_cgroup *root,
666 struct mem_cgroup *memcg);
667
668static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
669{
670 /*
671 * The root memcg doesn't account charges, and doesn't support
672 * protection.
673 */
674 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
675
676}
677
678static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
679{
680 if (!mem_cgroup_supports_protection(memcg))
681 return false;
682
683 return READ_ONCE(memcg->memory.elow) >=
684 page_counter_read(&memcg->memory);
685}
686
687static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
688{
689 if (!mem_cgroup_supports_protection(memcg))
690 return false;
691
692 return READ_ONCE(memcg->memory.emin) >=
693 page_counter_read(&memcg->memory);
694}
241994ed 695
d9eb1ea2 696int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
0add0c77
SB
697int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
698 gfp_t gfp, swp_entry_t entry);
699void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
3fea5a49 700
0a31bc97 701void mem_cgroup_uncharge(struct page *page);
747db954 702void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846d 703
6a93ca8f 704void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
569b846d 705
55779ec7 706/**
867e5e1d 707 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
55779ec7 708 * @memcg: memcg of the wanted lruvec
9a1ac228 709 * @pgdat: pglist_data
55779ec7 710 *
867e5e1d 711 * Returns the lru list vector holding pages for a given @memcg &
9a1ac228 712 * @pgdat combination. This can be the node lruvec, if the memory
867e5e1d 713 * controller is disabled.
55779ec7 714 */
867e5e1d
JW
715static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
716 struct pglist_data *pgdat)
55779ec7 717{
ef8f2327 718 struct mem_cgroup_per_node *mz;
55779ec7
JW
719 struct lruvec *lruvec;
720
721 if (mem_cgroup_disabled()) {
867e5e1d 722 lruvec = &pgdat->__lruvec;
55779ec7
JW
723 goto out;
724 }
725
1b05117d
JW
726 if (!memcg)
727 memcg = root_mem_cgroup;
728
a3747b53 729 mz = memcg->nodeinfo[pgdat->node_id];
55779ec7
JW
730 lruvec = &mz->lruvec;
731out:
732 /*
733 * Since a node can be onlined after the mem_cgroup was created,
599d0c95 734 * we have to be prepared to initialize lruvec->pgdat here;
55779ec7
JW
735 * and if offlined then reonlined, we need to reinitialize it.
736 */
ef8f2327
MG
737 if (unlikely(lruvec->pgdat != pgdat))
738 lruvec->pgdat = pgdat;
55779ec7
JW
739 return lruvec;
740}
741
9a1ac228
HS
742/**
743 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
744 * @page: the page
9a1ac228
HS
745 *
746 * This function relies on page->mem_cgroup being stable.
747 */
a984226f 748static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
9a1ac228 749{
a984226f 750 pg_data_t *pgdat = page_pgdat(page);
9a1ac228
HS
751 struct mem_cgroup *memcg = page_memcg(page);
752
7ea510b9 753 VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
9a1ac228
HS
754 return mem_cgroup_lruvec(memcg, pgdat);
755}
c9b0ed51 756
64219994 757struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905 758
d46eb14b
SB
759struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
760
6168d0da
AS
761struct lruvec *lock_page_lruvec(struct page *page);
762struct lruvec *lock_page_lruvec_irq(struct page *page);
763struct lruvec *lock_page_lruvec_irqsave(struct page *page,
764 unsigned long *flags);
765
766#ifdef CONFIG_DEBUG_VM
767void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
768#else
769static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
770{
771}
772#endif
773
33398cf2
MH
774static inline
775struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
776 return css ? container_of(css, struct mem_cgroup, css) : NULL;
777}
778
bf4f0599
RG
779static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
780{
781 return percpu_ref_tryget(&objcg->refcnt);
782}
783
784static inline void obj_cgroup_get(struct obj_cgroup *objcg)
785{
786 percpu_ref_get(&objcg->refcnt);
787}
788
b4e0b68f
MS
789static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
790 unsigned long nr)
bf4f0599 791{
b4e0b68f 792 percpu_ref_get_many(&objcg->refcnt, nr);
bf4f0599
RG
793}
794
b4e0b68f 795static inline void obj_cgroup_put(struct obj_cgroup *objcg)
bf4f0599 796{
b4e0b68f 797 percpu_ref_put(&objcg->refcnt);
bf4f0599
RG
798}
799
dc0b5864
RG
800static inline void mem_cgroup_put(struct mem_cgroup *memcg)
801{
d46eb14b
SB
802 if (memcg)
803 css_put(&memcg->css);
dc0b5864
RG
804}
805
8e8ae645
JW
806#define mem_cgroup_from_counter(counter, member) \
807 container_of(counter, struct mem_cgroup, member)
808
33398cf2
MH
809struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
810 struct mem_cgroup *,
811 struct mem_cgroup_reclaim_cookie *);
812void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
7c5f64f8
VD
813int mem_cgroup_scan_tasks(struct mem_cgroup *,
814 int (*)(struct task_struct *, void *), void *);
33398cf2 815
23047a96
JW
816static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
817{
818 if (mem_cgroup_disabled())
819 return 0;
820
73f576c0 821 return memcg->id.id;
23047a96 822}
73f576c0 823struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96 824
aa9694bb
CD
825static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
826{
827 return mem_cgroup_from_css(seq_css(m));
828}
829
2262185c
RG
830static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
831{
832 struct mem_cgroup_per_node *mz;
833
834 if (mem_cgroup_disabled())
835 return NULL;
836
837 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
838 return mz->memcg;
839}
840
8e8ae645
JW
841/**
842 * parent_mem_cgroup - find the accounting parent of a memcg
843 * @memcg: memcg whose parent to find
844 *
845 * Returns the parent memcg, or NULL if this is the root or the memory
846 * controller is in legacy no-hierarchy mode.
847 */
848static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
849{
850 if (!memcg->memory.parent)
851 return NULL;
852 return mem_cgroup_from_counter(memcg->memory.parent, memory);
853}
854
33398cf2
MH
855static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
856 struct mem_cgroup *root)
857{
858 if (root == memcg)
859 return true;
33398cf2
MH
860 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
861}
e1aab161 862
2314b42d
JW
863static inline bool mm_match_cgroup(struct mm_struct *mm,
864 struct mem_cgroup *memcg)
2e4d4091 865{
587af308 866 struct mem_cgroup *task_memcg;
413918bb 867 bool match = false;
c3ac9a8a 868
2e4d4091 869 rcu_read_lock();
587af308 870 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 871 if (task_memcg)
2314b42d 872 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 873 rcu_read_unlock();
c3ac9a8a 874 return match;
2e4d4091 875}
8a9f3ccd 876
64219994 877struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
2fc04524 878ino_t page_cgroup_ino(struct page *page);
d324236b 879
eb01aaab
VD
880static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
881{
882 if (mem_cgroup_disabled())
883 return true;
884 return !!(memcg->css.flags & CSS_ONLINE);
885}
886
58ae83db
KH
887/*
888 * For memory reclaim.
889 */
889976db 890int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
33398cf2
MH
891
892void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 893 int zid, int nr_pages);
33398cf2 894
b4536f0c
MH
895static inline
896unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
897 enum lru_list lru, int zone_idx)
898{
899 struct mem_cgroup_per_node *mz;
900
901 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
e0e3f42f 902 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
33398cf2
MH
903}
904
b23afb93
TH
905void mem_cgroup_handle_over_high(void);
906
bbec2e15 907unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
7c5f64f8 908
9783aa99
CD
909unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
910
f0c867d9 911void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
64219994 912 struct task_struct *p);
58ae83db 913
f0c867d9 914void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
915
29ef680a 916static inline void mem_cgroup_enter_user_fault(void)
519e5247 917{
29ef680a
MH
918 WARN_ON(current->in_user_fault);
919 current->in_user_fault = 1;
519e5247
JW
920}
921
29ef680a 922static inline void mem_cgroup_exit_user_fault(void)
519e5247 923{
29ef680a
MH
924 WARN_ON(!current->in_user_fault);
925 current->in_user_fault = 0;
519e5247
JW
926}
927
3812c8c8
JW
928static inline bool task_in_memcg_oom(struct task_struct *p)
929{
626ebc41 930 return p->memcg_in_oom;
3812c8c8
JW
931}
932
49426420 933bool mem_cgroup_oom_synchronize(bool wait);
3d8b38eb
RG
934struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
935 struct mem_cgroup *oom_domain);
936void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
3812c8c8 937
c255a458 938#ifdef CONFIG_MEMCG_SWAP
eccb52e7 939extern bool cgroup_memory_noswap;
c077719b 940#endif
f8d66542 941
1c824a68 942void lock_page_memcg(struct page *page);
62cccb8c 943void unlock_page_memcg(struct page *page);
d7365e78 944
db9adbcb 945void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
2a2e4885 946
04fecbf5 947/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 948static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 949 int idx, int val)
2a2e4885 950{
c3cc3911
JW
951 unsigned long flags;
952
953 local_irq_save(flags);
a983b5eb 954 __mod_memcg_state(memcg, idx, val);
c3cc3911 955 local_irq_restore(flags);
2a2e4885
JW
956}
957
42a30035
JW
958static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
959 enum node_stat_item idx)
960{
961 struct mem_cgroup_per_node *pn;
962 long x;
963
964 if (mem_cgroup_disabled())
965 return node_page_state(lruvec_pgdat(lruvec), idx);
966
967 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
968 x = atomic_long_read(&pn->lruvec_stat[idx]);
969#ifdef CONFIG_SMP
970 if (x < 0)
971 x = 0;
972#endif
973 return x;
974}
975
205b20cc
JW
976static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
977 enum node_stat_item idx)
2a7106f2 978{
00f3ca2c 979 struct mem_cgroup_per_node *pn;
815744d7
JW
980 long x = 0;
981 int cpu;
00f3ca2c
JW
982
983 if (mem_cgroup_disabled())
984 return node_page_state(lruvec_pgdat(lruvec), idx);
985
986 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
815744d7
JW
987 for_each_possible_cpu(cpu)
988 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
a983b5eb
JW
989#ifdef CONFIG_SMP
990 if (x < 0)
991 x = 0;
992#endif
993 return x;
2a7106f2
GT
994}
995
eedc4e5a
RG
996void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
997 int val);
da3ceeff 998void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
991e7673 999
da3ceeff 1000static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1001 int val)
1002{
1003 unsigned long flags;
1004
1005 local_irq_save(flags);
da3ceeff 1006 __mod_lruvec_kmem_state(p, idx, val);
991e7673
SB
1007 local_irq_restore(flags);
1008}
1009
eedc4e5a
RG
1010static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1011 enum node_stat_item idx, int val)
1012{
1013 unsigned long flags;
1014
1015 local_irq_save(flags);
1016 __mod_memcg_lruvec_state(lruvec, idx, val);
1017 local_irq_restore(flags);
1018}
1019
db9adbcb
JW
1020void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1021 unsigned long count);
c9019e9b 1022
2262185c 1023static inline void count_memcg_events(struct mem_cgroup *memcg,
e27be240
JW
1024 enum vm_event_item idx,
1025 unsigned long count)
2262185c 1026{
c3cc3911
JW
1027 unsigned long flags;
1028
1029 local_irq_save(flags);
a983b5eb 1030 __count_memcg_events(memcg, idx, count);
c3cc3911 1031 local_irq_restore(flags);
2262185c
RG
1032}
1033
1034static inline void count_memcg_page_event(struct page *page,
e27be240 1035 enum vm_event_item idx)
2262185c 1036{
bcfe06bf
RG
1037 struct mem_cgroup *memcg = page_memcg(page);
1038
1039 if (memcg)
1040 count_memcg_events(memcg, idx, 1);
2262185c
RG
1041}
1042
1043static inline void count_memcg_event_mm(struct mm_struct *mm,
1044 enum vm_event_item idx)
68ae564b 1045{
33398cf2
MH
1046 struct mem_cgroup *memcg;
1047
68ae564b
DR
1048 if (mem_cgroup_disabled())
1049 return;
33398cf2
MH
1050
1051 rcu_read_lock();
1052 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
fe6bdfc8 1053 if (likely(memcg))
c9019e9b 1054 count_memcg_events(memcg, idx, 1);
33398cf2 1055 rcu_read_unlock();
68ae564b 1056}
c9019e9b 1057
e27be240
JW
1058static inline void memcg_memory_event(struct mem_cgroup *memcg,
1059 enum memcg_memory_event event)
c9019e9b 1060{
8b21ca02
MS
1061 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1062 event == MEMCG_SWAP_FAIL;
1063
1e577f97 1064 atomic_long_inc(&memcg->memory_events_local[event]);
8b21ca02
MS
1065 if (!swap_event)
1066 cgroup_file_notify(&memcg->events_local_file);
1e577f97 1067
9852ae3f
CD
1068 do {
1069 atomic_long_inc(&memcg->memory_events[event]);
8b21ca02
MS
1070 if (swap_event)
1071 cgroup_file_notify(&memcg->swap_events_file);
1072 else
1073 cgroup_file_notify(&memcg->events_file);
9852ae3f 1074
04fd61a4
YS
1075 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1076 break;
9852ae3f
CD
1077 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1078 break;
1079 } while ((memcg = parent_mem_cgroup(memcg)) &&
1080 !mem_cgroup_is_root(memcg));
c9019e9b
JW
1081}
1082
fe6bdfc8
RG
1083static inline void memcg_memory_event_mm(struct mm_struct *mm,
1084 enum memcg_memory_event event)
1085{
1086 struct mem_cgroup *memcg;
1087
1088 if (mem_cgroup_disabled())
1089 return;
1090
1091 rcu_read_lock();
1092 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1093 if (likely(memcg))
1094 memcg_memory_event(memcg, event);
1095 rcu_read_unlock();
1096}
1097
be6c8982 1098void split_page_memcg(struct page *head, unsigned int nr);
ca3e0214 1099
2d146aa3
JW
1100unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1101 gfp_t gfp_mask,
1102 unsigned long *total_scanned);
1103
c255a458 1104#else /* CONFIG_MEMCG */
23047a96
JW
1105
1106#define MEM_CGROUP_ID_SHIFT 0
1107#define MEM_CGROUP_ID_MAX 0
1108
bcfe06bf
RG
1109static inline struct mem_cgroup *page_memcg(struct page *page)
1110{
1111 return NULL;
1112}
1113
1114static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1115{
1116 WARN_ON_ONCE(!rcu_read_lock_held());
1117 return NULL;
1118}
1119
1120static inline struct mem_cgroup *page_memcg_check(struct page *page)
1121{
1122 return NULL;
1123}
1124
18b2db3b
RG
1125static inline bool PageMemcgKmem(struct page *page)
1126{
1127 return false;
1128}
1129
dfd2f10c
KT
1130static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1131{
1132 return true;
1133}
1134
23047a96
JW
1135static inline bool mem_cgroup_disabled(void)
1136{
1137 return true;
1138}
1139
e27be240
JW
1140static inline void memcg_memory_event(struct mem_cgroup *memcg,
1141 enum memcg_memory_event event)
241994ed
JW
1142{
1143}
1144
fe6bdfc8
RG
1145static inline void memcg_memory_event_mm(struct mm_struct *mm,
1146 enum memcg_memory_event event)
1147{
1148}
1149
22f7496f
YS
1150static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
1151 struct mem_cgroup *memcg,
1bc63fb1 1152 bool in_low_reclaim)
9783aa99 1153{
1bc63fb1 1154 return 0;
9783aa99
CD
1155}
1156
45c7f7e1
CD
1157static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1158 struct mem_cgroup *memcg)
1159{
1160}
1161
1162static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1163{
1164 return false;
1165}
1166
1167static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
241994ed 1168{
45c7f7e1 1169 return false;
241994ed
JW
1170}
1171
3fea5a49 1172static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
d9eb1ea2 1173 gfp_t gfp_mask)
3fea5a49
JW
1174{
1175 return 0;
1176}
1177
0add0c77
SB
1178static inline int mem_cgroup_swapin_charge_page(struct page *page,
1179 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1180{
1181 return 0;
1182}
1183
1184static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1185{
1186}
1187
0a31bc97 1188static inline void mem_cgroup_uncharge(struct page *page)
569b846d
KH
1189{
1190}
1191
747db954 1192static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
1193{
1194}
1195
6a93ca8f 1196static inline void mem_cgroup_migrate(struct page *old, struct page *new)
69029cd5
KH
1197{
1198}
1199
867e5e1d
JW
1200static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1201 struct pglist_data *pgdat)
08e552c6 1202{
867e5e1d 1203 return &pgdat->__lruvec;
08e552c6
KH
1204}
1205
a984226f 1206static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
66e1707b 1207{
a984226f
MS
1208 pg_data_t *pgdat = page_pgdat(page);
1209
867e5e1d 1210 return &pgdat->__lruvec;
66e1707b
BS
1211}
1212
2d146aa3
JW
1213static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1214{
1215}
1216
b910718a
JW
1217static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1218{
1219 return NULL;
1220}
1221
587af308 1222static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 1223 struct mem_cgroup *memcg)
bed7161a 1224{
587af308 1225 return true;
bed7161a
BS
1226}
1227
d46eb14b
SB
1228static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1229{
1230 return NULL;
1231}
1232
c74d40e8
DS
1233static inline
1234struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1235{
1236 return NULL;
1237}
1238
dc0b5864
RG
1239static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1240{
1241}
1242
6168d0da
AS
1243static inline struct lruvec *lock_page_lruvec(struct page *page)
1244{
1245 struct pglist_data *pgdat = page_pgdat(page);
1246
1247 spin_lock(&pgdat->__lruvec.lru_lock);
1248 return &pgdat->__lruvec;
1249}
1250
1251static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
1252{
1253 struct pglist_data *pgdat = page_pgdat(page);
1254
1255 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1256 return &pgdat->__lruvec;
1257}
1258
1259static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
1260 unsigned long *flagsp)
1261{
1262 struct pglist_data *pgdat = page_pgdat(page);
1263
1264 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1265 return &pgdat->__lruvec;
1266}
1267
5660048c
JW
1268static inline struct mem_cgroup *
1269mem_cgroup_iter(struct mem_cgroup *root,
1270 struct mem_cgroup *prev,
1271 struct mem_cgroup_reclaim_cookie *reclaim)
1272{
1273 return NULL;
1274}
1275
1276static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1277 struct mem_cgroup *prev)
1278{
1279}
1280
7c5f64f8
VD
1281static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1282 int (*fn)(struct task_struct *, void *), void *arg)
1283{
1284 return 0;
1285}
1286
23047a96 1287static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d66542 1288{
23047a96
JW
1289 return 0;
1290}
1291
1292static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1293{
1294 WARN_ON_ONCE(id);
1295 /* XXX: This should always return root_mem_cgroup */
1296 return NULL;
f8d66542 1297}
a636b327 1298
aa9694bb
CD
1299static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1300{
1301 return NULL;
1302}
1303
2262185c
RG
1304static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1305{
1306 return NULL;
1307}
1308
eb01aaab 1309static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e23 1310{
13308ca9 1311 return true;
14797e23
KM
1312}
1313
b4536f0c
MH
1314static inline
1315unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1316 enum lru_list lru, int zone_idx)
1317{
1318 return 0;
1319}
a3d8e054 1320
bbec2e15 1321static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
7c5f64f8
VD
1322{
1323 return 0;
1324}
1325
9783aa99
CD
1326static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1327{
1328 return 0;
1329}
1330
e222432b 1331static inline void
f0c867d9 1332mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1333{
1334}
1335
1336static inline void
1337mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
e222432b
BS
1338{
1339}
1340
1c824a68 1341static inline void lock_page_memcg(struct page *page)
89c06bd5
KH
1342{
1343}
1344
62cccb8c 1345static inline void unlock_page_memcg(struct page *page)
89c06bd5
KH
1346{
1347}
1348
b23afb93
TH
1349static inline void mem_cgroup_handle_over_high(void)
1350{
1351}
1352
29ef680a 1353static inline void mem_cgroup_enter_user_fault(void)
519e5247
JW
1354{
1355}
1356
29ef680a 1357static inline void mem_cgroup_exit_user_fault(void)
519e5247
JW
1358{
1359}
1360
3812c8c8
JW
1361static inline bool task_in_memcg_oom(struct task_struct *p)
1362{
1363 return false;
1364}
1365
49426420 1366static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
1367{
1368 return false;
1369}
1370
3d8b38eb
RG
1371static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1372 struct task_struct *victim, struct mem_cgroup *oom_domain)
1373{
1374 return NULL;
1375}
1376
1377static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1378{
1379}
1380
00f3ca2c 1381static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1382 int idx,
00f3ca2c 1383 int nr)
2a2e4885
JW
1384{
1385}
1386
00f3ca2c 1387static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1388 int idx,
00f3ca2c 1389 int nr)
2a2e4885
JW
1390{
1391}
1392
42a30035
JW
1393static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1394 enum node_stat_item idx)
1395{
1396 return node_page_state(lruvec_pgdat(lruvec), idx);
1397}
1398
205b20cc
JW
1399static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1400 enum node_stat_item idx)
2a7106f2 1401{
00f3ca2c 1402 return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2
GT
1403}
1404
eedc4e5a
RG
1405static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1406 enum node_stat_item idx, int val)
1407{
1408}
1409
da3ceeff 1410static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
ec9f0238
RG
1411 int val)
1412{
1413 struct page *page = virt_to_head_page(p);
1414
1415 __mod_node_page_state(page_pgdat(page), idx, val);
1416}
1417
da3ceeff 1418static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1419 int val)
1420{
1421 struct page *page = virt_to_head_page(p);
1422
1423 mod_node_page_state(page_pgdat(page), idx, val);
1424}
1425
2262185c
RG
1426static inline void count_memcg_events(struct mem_cgroup *memcg,
1427 enum vm_event_item idx,
1428 unsigned long count)
1429{
1430}
1431
9851ac13
KT
1432static inline void __count_memcg_events(struct mem_cgroup *memcg,
1433 enum vm_event_item idx,
1434 unsigned long count)
1435{
1436}
1437
2262185c 1438static inline void count_memcg_page_event(struct page *page,
04fecbf5 1439 int idx)
2262185c
RG
1440{
1441}
1442
456f998e 1443static inline
2262185c 1444void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998e
YH
1445{
1446}
6168d0da 1447
2d146aa3
JW
1448static inline void split_page_memcg(struct page *head, unsigned int nr)
1449{
1450}
1451
1452static inline
1453unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1454 gfp_t gfp_mask,
1455 unsigned long *total_scanned)
6168d0da 1456{
2d146aa3 1457 return 0;
6168d0da 1458}
c255a458 1459#endif /* CONFIG_MEMCG */
78fb7466 1460
da3ceeff 1461static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1462{
da3ceeff 1463 __mod_lruvec_kmem_state(p, idx, 1);
ec9f0238
RG
1464}
1465
da3ceeff 1466static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1467{
da3ceeff 1468 __mod_lruvec_kmem_state(p, idx, -1);
ec9f0238
RG
1469}
1470
7cf111bc
JW
1471static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1472{
1473 struct mem_cgroup *memcg;
1474
1475 memcg = lruvec_memcg(lruvec);
1476 if (!memcg)
1477 return NULL;
1478 memcg = parent_mem_cgroup(memcg);
1479 if (!memcg)
1480 return NULL;
1481 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1482}
1483
6168d0da
AS
1484static inline void unlock_page_lruvec(struct lruvec *lruvec)
1485{
1486 spin_unlock(&lruvec->lru_lock);
1487}
1488
1489static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1490{
1491 spin_unlock_irq(&lruvec->lru_lock);
1492}
1493
1494static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1495 unsigned long flags)
1496{
1497 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1498}
1499
7467c391
MS
1500/* Test requires a stable page->memcg binding, see page_memcg() */
1501static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
f2e4d28d
MS
1502{
1503 return lruvec_pgdat(lruvec) == page_pgdat(page) &&
1504 lruvec_memcg(lruvec) == page_memcg(page);
1505}
1506
2a5e4e34
AD
1507/* Don't lock again iff page's lruvec locked */
1508static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
1509 struct lruvec *locked_lruvec)
1510{
1511 if (locked_lruvec) {
7467c391 1512 if (page_matches_lruvec(page, locked_lruvec))
2a5e4e34
AD
1513 return locked_lruvec;
1514
1515 unlock_page_lruvec_irq(locked_lruvec);
1516 }
1517
1518 return lock_page_lruvec_irq(page);
1519}
1520
1521/* Don't lock again iff page's lruvec locked */
1522static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
1523 struct lruvec *locked_lruvec, unsigned long *flags)
1524{
1525 if (locked_lruvec) {
7467c391 1526 if (page_matches_lruvec(page, locked_lruvec))
2a5e4e34
AD
1527 return locked_lruvec;
1528
1529 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1530 }
1531
1532 return lock_page_lruvec_irqsave(page, flags);
1533}
1534
52ebea74 1535#ifdef CONFIG_CGROUP_WRITEBACK
841710aa 1536
841710aa 1537struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cd
TH
1538void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1539 unsigned long *pheadroom, unsigned long *pdirty,
1540 unsigned long *pwriteback);
841710aa 1541
97b27821
TH
1542void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1543 struct bdi_writeback *wb);
1544
1545static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1546 struct bdi_writeback *wb)
1547{
08d1d0e6
BH
1548 if (mem_cgroup_disabled())
1549 return;
1550
bcfe06bf 1551 if (unlikely(&page_memcg(page)->css != wb->memcg_css))
97b27821
TH
1552 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1553}
1554
1555void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1556
841710aa
TH
1557#else /* CONFIG_CGROUP_WRITEBACK */
1558
1559static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1560{
1561 return NULL;
1562}
1563
c2aa723a 1564static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cd
TH
1565 unsigned long *pfilepages,
1566 unsigned long *pheadroom,
c2aa723a
TH
1567 unsigned long *pdirty,
1568 unsigned long *pwriteback)
1569{
1570}
1571
97b27821
TH
1572static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1573 struct bdi_writeback *wb)
1574{
1575}
1576
1577static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1578{
1579}
1580
841710aa 1581#endif /* CONFIG_CGROUP_WRITEBACK */
52ebea74 1582
e1aab161 1583struct sock;
4b1327be
WW
1584bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1585 gfp_t gfp_mask);
baac50bb 1586void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e4 1587#ifdef CONFIG_MEMCG
ef12947c
JW
1588extern struct static_key_false memcg_sockets_enabled_key;
1589#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d758073
JW
1590void mem_cgroup_sk_alloc(struct sock *sk);
1591void mem_cgroup_sk_free(struct sock *sk);
baac50bb 1592static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c 1593{
0db15298 1594 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
8e8ae645 1595 return true;
8e8ae645
JW
1596 do {
1597 if (time_before(jiffies, memcg->socket_pressure))
1598 return true;
1599 } while ((memcg = parent_mem_cgroup(memcg)));
1600 return false;
e805605c 1601}
0a432dcb 1602
e4262c4f
YS
1603int alloc_shrinker_info(struct mem_cgroup *memcg);
1604void free_shrinker_info(struct mem_cgroup *memcg);
2bfd3637 1605void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
a178015c 1606void reparent_shrinker_deferred(struct mem_cgroup *memcg);
e805605c 1607#else
80e95fe0 1608#define mem_cgroup_sockets_enabled 0
2d758073
JW
1609static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1610static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bb 1611static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c
JW
1612{
1613 return false;
1614}
0a432dcb 1615
2bfd3637
YS
1616static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1617 int nid, int shrinker_id)
0a432dcb
YS
1618{
1619}
e805605c 1620#endif
7ae1e1d0 1621
9b6f7e16 1622#ifdef CONFIG_MEMCG_KMEM
4d5c8aed 1623bool mem_cgroup_kmem_disabled(void);
f4b00eab
RG
1624int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1625void __memcg_kmem_uncharge_page(struct page *page, int order);
45264778 1626
bf4f0599
RG
1627struct obj_cgroup *get_obj_cgroup_from_current(void);
1628
1629int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1630void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1631
ef12947c 1632extern struct static_key_false memcg_kmem_enabled_key;
749c5415 1633
dbcf73e2 1634extern int memcg_nr_cache_ids;
64219994
MH
1635void memcg_get_cache_ids(void);
1636void memcg_put_cache_ids(void);
ebe945c2
GC
1637
1638/*
1639 * Helper macro to loop through all memcg-specific caches. Callers must still
1640 * check if the cache is valid (it is either valid or NULL).
1641 * the slab_mutex must be held when looping through those caches
1642 */
749c5415 1643#define for_each_memcg_cache_index(_idx) \
dbcf73e2 1644 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
749c5415 1645
7ae1e1d0
GC
1646static inline bool memcg_kmem_enabled(void)
1647{
eda330e5 1648 return static_branch_likely(&memcg_kmem_enabled_key);
7ae1e1d0
GC
1649}
1650
f4b00eab
RG
1651static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1652 int order)
60cd4bcd
SB
1653{
1654 if (memcg_kmem_enabled())
f4b00eab 1655 return __memcg_kmem_charge_page(page, gfp, order);
60cd4bcd
SB
1656 return 0;
1657}
1658
f4b00eab 1659static inline void memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1660{
1661 if (memcg_kmem_enabled())
f4b00eab 1662 __memcg_kmem_uncharge_page(page, order);
60cd4bcd
SB
1663}
1664
33398cf2 1665/*
a7cb874b
RG
1666 * A helper for accessing memcg's kmem_id, used for getting
1667 * corresponding LRU lists.
33398cf2
MH
1668 */
1669static inline int memcg_cache_id(struct mem_cgroup *memcg)
1670{
1671 return memcg ? memcg->kmemcg_id : -1;
1672}
5722d094 1673
8380ce47
RG
1674struct mem_cgroup *mem_cgroup_from_obj(void *p);
1675
7ae1e1d0 1676#else
4d5c8aed
RG
1677static inline bool mem_cgroup_kmem_disabled(void)
1678{
1679 return true;
1680}
9b6f7e16 1681
f4b00eab
RG
1682static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1683 int order)
9b6f7e16
RG
1684{
1685 return 0;
1686}
1687
f4b00eab 1688static inline void memcg_kmem_uncharge_page(struct page *page, int order)
9b6f7e16
RG
1689{
1690}
1691
f4b00eab
RG
1692static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1693 int order)
60cd4bcd
SB
1694{
1695 return 0;
1696}
1697
f4b00eab 1698static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1699{
1700}
1701
749c5415
GC
1702#define for_each_memcg_cache_index(_idx) \
1703 for (; NULL; )
1704
b9ce5ef4
GC
1705static inline bool memcg_kmem_enabled(void)
1706{
1707 return false;
1708}
1709
2633d7a0
GC
1710static inline int memcg_cache_id(struct mem_cgroup *memcg)
1711{
1712 return -1;
1713}
1714
05257a1a
VD
1715static inline void memcg_get_cache_ids(void)
1716{
1717}
1718
1719static inline void memcg_put_cache_ids(void)
1720{
1721}
1722
8380ce47
RG
1723static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1724{
1725 return NULL;
1726}
1727
84c07d11 1728#endif /* CONFIG_MEMCG_KMEM */
127424c8 1729
8cdea7c0 1730#endif /* _LINUX_MEMCONTROL_H */