]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
8cdea7c0 BS |
2 | /* memcontrol.h - Memory Controller |
3 | * | |
4 | * Copyright IBM Corporation, 2007 | |
5 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
6 | * | |
78fb7466 PE |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8cdea7c0 BS |
9 | */ |
10 | ||
11 | #ifndef _LINUX_MEMCONTROL_H | |
12 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 13 | #include <linux/cgroup.h> |
456f998e | 14 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 15 | #include <linux/hardirq.h> |
a8964b9b | 16 | #include <linux/jump_label.h> |
33398cf2 MH |
17 | #include <linux/page_counter.h> |
18 | #include <linux/vmpressure.h> | |
19 | #include <linux/eventfd.h> | |
00f3ca2c JW |
20 | #include <linux/mm.h> |
21 | #include <linux/vmstat.h> | |
33398cf2 | 22 | #include <linux/writeback.h> |
fdf1cdb9 | 23 | #include <linux/page-flags.h> |
456f998e | 24 | |
78fb7466 | 25 | struct mem_cgroup; |
bf4f0599 | 26 | struct obj_cgroup; |
8697d331 BS |
27 | struct page; |
28 | struct mm_struct; | |
2633d7a0 | 29 | struct kmem_cache; |
78fb7466 | 30 | |
71cd3113 JW |
31 | /* Cgroup-specific page state, on top of universal node page state */ |
32 | enum memcg_stat_item { | |
468c3982 | 33 | MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, |
71cd3113 | 34 | MEMCG_SOCK, |
772616b0 | 35 | MEMCG_PERCPU_B, |
b2807f07 | 36 | MEMCG_NR_STAT, |
2a7106f2 GT |
37 | }; |
38 | ||
e27be240 JW |
39 | enum memcg_memory_event { |
40 | MEMCG_LOW, | |
71cd3113 JW |
41 | MEMCG_HIGH, |
42 | MEMCG_MAX, | |
43 | MEMCG_OOM, | |
fe6bdfc8 | 44 | MEMCG_OOM_KILL, |
4b82ab4f | 45 | MEMCG_SWAP_HIGH, |
f3a53a3a TH |
46 | MEMCG_SWAP_MAX, |
47 | MEMCG_SWAP_FAIL, | |
e27be240 | 48 | MEMCG_NR_MEMORY_EVENTS, |
71cd3113 JW |
49 | }; |
50 | ||
5660048c | 51 | struct mem_cgroup_reclaim_cookie { |
ef8f2327 | 52 | pg_data_t *pgdat; |
5660048c JW |
53 | unsigned int generation; |
54 | }; | |
55 | ||
71cd3113 JW |
56 | #ifdef CONFIG_MEMCG |
57 | ||
58 | #define MEM_CGROUP_ID_SHIFT 16 | |
59 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
60 | ||
61 | struct mem_cgroup_id { | |
62 | int id; | |
1c2d479a | 63 | refcount_t ref; |
71cd3113 JW |
64 | }; |
65 | ||
33398cf2 MH |
66 | /* |
67 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
0845f831 RD |
68 | * it will be incremented by the number of pages. This counter is used |
69 | * to trigger some periodic events. This is straightforward and better | |
33398cf2 MH |
70 | * than using jiffies etc. to handle periodic memcg event. |
71 | */ | |
72 | enum mem_cgroup_events_target { | |
73 | MEM_CGROUP_TARGET_THRESH, | |
74 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
33398cf2 MH |
75 | MEM_CGROUP_NTARGETS, |
76 | }; | |
77 | ||
871789d4 | 78 | struct memcg_vmstats_percpu { |
2d146aa3 JW |
79 | /* Local (CPU and cgroup) page state & events */ |
80 | long state[MEMCG_NR_STAT]; | |
81 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
82 | ||
83 | /* Delta calculation for lockless upward propagation */ | |
84 | long state_prev[MEMCG_NR_STAT]; | |
85 | unsigned long events_prev[NR_VM_EVENT_ITEMS]; | |
86 | ||
87 | /* Cgroup1: threshold notifications & softlimit tree updates */ | |
88 | unsigned long nr_page_events; | |
89 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
90 | }; | |
91 | ||
92 | struct memcg_vmstats { | |
93 | /* Aggregated (CPU and subtree) page state & events */ | |
94 | long state[MEMCG_NR_STAT]; | |
95 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
96 | ||
97 | /* Pending child counts during tree propagation */ | |
98 | long state_pending[MEMCG_NR_STAT]; | |
99 | unsigned long events_pending[NR_VM_EVENT_ITEMS]; | |
33398cf2 MH |
100 | }; |
101 | ||
102 | struct mem_cgroup_reclaim_iter { | |
103 | struct mem_cgroup *position; | |
104 | /* scan generation, increased every round-trip */ | |
105 | unsigned int generation; | |
106 | }; | |
107 | ||
0a4465d3 | 108 | /* |
3c6f17e6 YS |
109 | * Bitmap and deferred work of shrinker::id corresponding to memcg-aware |
110 | * shrinkers, which have elements charged to this memcg. | |
0a4465d3 | 111 | */ |
e4262c4f | 112 | struct shrinker_info { |
0a4465d3 | 113 | struct rcu_head rcu; |
3c6f17e6 YS |
114 | atomic_long_t *nr_deferred; |
115 | unsigned long *map; | |
0a4465d3 KT |
116 | }; |
117 | ||
7e1c0d6f SB |
118 | struct lruvec_stats_percpu { |
119 | /* Local (CPU and cgroup) state */ | |
120 | long state[NR_VM_NODE_STAT_ITEMS]; | |
121 | ||
122 | /* Delta calculation for lockless upward propagation */ | |
123 | long state_prev[NR_VM_NODE_STAT_ITEMS]; | |
124 | }; | |
125 | ||
126 | struct lruvec_stats { | |
127 | /* Aggregated (CPU and subtree) state */ | |
128 | long state[NR_VM_NODE_STAT_ITEMS]; | |
129 | ||
130 | /* Pending child counts during tree propagation */ | |
131 | long state_pending[NR_VM_NODE_STAT_ITEMS]; | |
132 | }; | |
133 | ||
33398cf2 | 134 | /* |
242c37b4 | 135 | * per-node information in memory controller. |
33398cf2 | 136 | */ |
ef8f2327 | 137 | struct mem_cgroup_per_node { |
33398cf2 | 138 | struct lruvec lruvec; |
a983b5eb | 139 | |
7e1c0d6f SB |
140 | struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; |
141 | struct lruvec_stats lruvec_stats; | |
a983b5eb | 142 | |
b4536f0c | 143 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
33398cf2 | 144 | |
9da83f3f | 145 | struct mem_cgroup_reclaim_iter iter; |
33398cf2 | 146 | |
e4262c4f | 147 | struct shrinker_info __rcu *shrinker_info; |
0a432dcb | 148 | |
33398cf2 MH |
149 | struct rb_node tree_node; /* RB tree node */ |
150 | unsigned long usage_in_excess;/* Set to the value by which */ | |
151 | /* the soft limit is exceeded*/ | |
152 | bool on_tree; | |
153 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
154 | /* use container_of */ | |
155 | }; | |
156 | ||
33398cf2 MH |
157 | struct mem_cgroup_threshold { |
158 | struct eventfd_ctx *eventfd; | |
159 | unsigned long threshold; | |
160 | }; | |
161 | ||
162 | /* For threshold */ | |
163 | struct mem_cgroup_threshold_ary { | |
164 | /* An array index points to threshold just below or equal to usage. */ | |
165 | int current_threshold; | |
166 | /* Size of entries[] */ | |
167 | unsigned int size; | |
168 | /* Array of thresholds */ | |
307ed94c | 169 | struct mem_cgroup_threshold entries[]; |
33398cf2 MH |
170 | }; |
171 | ||
172 | struct mem_cgroup_thresholds { | |
173 | /* Primary thresholds array */ | |
174 | struct mem_cgroup_threshold_ary *primary; | |
175 | /* | |
176 | * Spare threshold array. | |
177 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
178 | * It must be able to store at least primary->size - 1 entries. | |
179 | */ | |
180 | struct mem_cgroup_threshold_ary *spare; | |
181 | }; | |
182 | ||
567e9ab2 JW |
183 | enum memcg_kmem_state { |
184 | KMEM_NONE, | |
185 | KMEM_ALLOCATED, | |
186 | KMEM_ONLINE, | |
187 | }; | |
188 | ||
e81bf979 AL |
189 | #if defined(CONFIG_SMP) |
190 | struct memcg_padding { | |
191 | char x[0]; | |
192 | } ____cacheline_internodealigned_in_smp; | |
6a1803bb | 193 | #define MEMCG_PADDING(name) struct memcg_padding name |
e81bf979 AL |
194 | #else |
195 | #define MEMCG_PADDING(name) | |
196 | #endif | |
197 | ||
97b27821 TH |
198 | /* |
199 | * Remember four most recent foreign writebacks with dirty pages in this | |
200 | * cgroup. Inode sharing is expected to be uncommon and, even if we miss | |
201 | * one in a given round, we're likely to catch it later if it keeps | |
202 | * foreign-dirtying, so a fairly low count should be enough. | |
203 | * | |
204 | * See mem_cgroup_track_foreign_dirty_slowpath() for details. | |
205 | */ | |
206 | #define MEMCG_CGWB_FRN_CNT 4 | |
207 | ||
208 | struct memcg_cgwb_frn { | |
209 | u64 bdi_id; /* bdi->id of the foreign inode */ | |
210 | int memcg_id; /* memcg->css.id of foreign inode */ | |
211 | u64 at; /* jiffies_64 at the time of dirtying */ | |
212 | struct wb_completion done; /* tracks in-flight foreign writebacks */ | |
213 | }; | |
214 | ||
bf4f0599 RG |
215 | /* |
216 | * Bucket for arbitrarily byte-sized objects charged to a memory | |
217 | * cgroup. The bucket can be reparented in one piece when the cgroup | |
218 | * is destroyed, without having to round up the individual references | |
219 | * of all live memory objects in the wild. | |
220 | */ | |
221 | struct obj_cgroup { | |
222 | struct percpu_ref refcnt; | |
223 | struct mem_cgroup *memcg; | |
224 | atomic_t nr_charged_bytes; | |
225 | union { | |
226 | struct list_head list; | |
227 | struct rcu_head rcu; | |
228 | }; | |
229 | }; | |
230 | ||
33398cf2 MH |
231 | /* |
232 | * The memory controller data structure. The memory controller controls both | |
233 | * page cache and RSS per cgroup. We would eventually like to provide | |
234 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
235 | * to help the administrator determine what knobs to tune. | |
236 | */ | |
237 | struct mem_cgroup { | |
238 | struct cgroup_subsys_state css; | |
239 | ||
73f576c0 JW |
240 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
241 | struct mem_cgroup_id id; | |
242 | ||
33398cf2 | 243 | /* Accounted resources */ |
bd0b230f WL |
244 | struct page_counter memory; /* Both v1 & v2 */ |
245 | ||
246 | union { | |
247 | struct page_counter swap; /* v2 only */ | |
248 | struct page_counter memsw; /* v1 only */ | |
249 | }; | |
0db15298 JW |
250 | |
251 | /* Legacy consumer-oriented counters */ | |
bd0b230f WL |
252 | struct page_counter kmem; /* v1 only */ |
253 | struct page_counter tcpmem; /* v1 only */ | |
33398cf2 | 254 | |
f7e1cb6e JW |
255 | /* Range enforcement for interrupt charges */ |
256 | struct work_struct high_work; | |
257 | ||
33398cf2 MH |
258 | unsigned long soft_limit; |
259 | ||
260 | /* vmpressure notifications */ | |
261 | struct vmpressure vmpressure; | |
262 | ||
3d8b38eb RG |
263 | /* |
264 | * Should the OOM killer kill all belonging tasks, had it kill one? | |
265 | */ | |
266 | bool oom_group; | |
267 | ||
33398cf2 MH |
268 | /* protected by memcg_oom_lock */ |
269 | bool oom_lock; | |
270 | int under_oom; | |
271 | ||
272 | int swappiness; | |
273 | /* OOM-Killer disable */ | |
274 | int oom_kill_disable; | |
275 | ||
1e577f97 | 276 | /* memory.events and memory.events.local */ |
472912a2 | 277 | struct cgroup_file events_file; |
1e577f97 | 278 | struct cgroup_file events_local_file; |
472912a2 | 279 | |
f3a53a3a TH |
280 | /* handle for "memory.swap.events" */ |
281 | struct cgroup_file swap_events_file; | |
282 | ||
33398cf2 MH |
283 | /* protect arrays of thresholds */ |
284 | struct mutex thresholds_lock; | |
285 | ||
286 | /* thresholds for memory usage. RCU-protected */ | |
287 | struct mem_cgroup_thresholds thresholds; | |
288 | ||
289 | /* thresholds for mem+swap usage. RCU-protected */ | |
290 | struct mem_cgroup_thresholds memsw_thresholds; | |
291 | ||
292 | /* For oom notifier event fd */ | |
293 | struct list_head oom_notify; | |
294 | ||
295 | /* | |
296 | * Should we move charges of a task when a task is moved into this | |
297 | * mem_cgroup ? And what type of charges should we move ? | |
298 | */ | |
299 | unsigned long move_charge_at_immigrate; | |
e81bf979 AL |
300 | /* taken only while moving_account > 0 */ |
301 | spinlock_t move_lock; | |
302 | unsigned long move_lock_flags; | |
303 | ||
304 | MEMCG_PADDING(_pad1_); | |
305 | ||
2d146aa3 JW |
306 | /* memory.stat */ |
307 | struct memcg_vmstats vmstats; | |
42a30035 | 308 | |
815744d7 | 309 | /* memory.events */ |
42a30035 | 310 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; |
1e577f97 | 311 | atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; |
33398cf2 | 312 | |
d886f4e4 JW |
313 | unsigned long socket_pressure; |
314 | ||
315 | /* Legacy tcp memory accounting */ | |
0db15298 JW |
316 | bool tcpmem_active; |
317 | int tcpmem_pressure; | |
d886f4e4 | 318 | |
84c07d11 | 319 | #ifdef CONFIG_MEMCG_KMEM |
33398cf2 | 320 | int kmemcg_id; |
567e9ab2 | 321 | enum memcg_kmem_state kmem_state; |
bf4f0599 RG |
322 | struct obj_cgroup __rcu *objcg; |
323 | struct list_head objcg_list; /* list of inherited objcgs */ | |
33398cf2 MH |
324 | #endif |
325 | ||
4df91062 FT |
326 | MEMCG_PADDING(_pad2_); |
327 | ||
328 | /* | |
329 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
330 | */ | |
331 | atomic_t moving_account; | |
332 | struct task_struct *move_lock_task; | |
333 | ||
4df91062 FT |
334 | struct memcg_vmstats_percpu __percpu *vmstats_percpu; |
335 | ||
33398cf2 MH |
336 | #ifdef CONFIG_CGROUP_WRITEBACK |
337 | struct list_head cgwb_list; | |
338 | struct wb_domain cgwb_domain; | |
97b27821 | 339 | struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; |
33398cf2 MH |
340 | #endif |
341 | ||
342 | /* List of events which userspace want to receive */ | |
343 | struct list_head event_list; | |
344 | spinlock_t event_list_lock; | |
345 | ||
87eaceb3 YS |
346 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
347 | struct deferred_split deferred_split_queue; | |
348 | #endif | |
349 | ||
b51478a0 | 350 | struct mem_cgroup_per_node *nodeinfo[]; |
33398cf2 | 351 | }; |
7d828602 | 352 | |
a983b5eb JW |
353 | /* |
354 | * size of first charge trial. "32" comes from vmscan.c's magic value. | |
355 | * TODO: maybe necessary to use big numbers in big irons. | |
356 | */ | |
357 | #define MEMCG_CHARGE_BATCH 32U | |
358 | ||
7d828602 | 359 | extern struct mem_cgroup *root_mem_cgroup; |
56161634 | 360 | |
87944e29 RG |
361 | enum page_memcg_data_flags { |
362 | /* page->memcg_data is a pointer to an objcgs vector */ | |
363 | MEMCG_DATA_OBJCGS = (1UL << 0), | |
18b2db3b RG |
364 | /* page has been accounted as a non-slab kernel page */ |
365 | MEMCG_DATA_KMEM = (1UL << 1), | |
87944e29 | 366 | /* the next bit after the last actual flag */ |
18b2db3b | 367 | __NR_MEMCG_DATA_FLAGS = (1UL << 2), |
87944e29 RG |
368 | }; |
369 | ||
370 | #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) | |
371 | ||
b4e0b68f MS |
372 | static inline bool PageMemcgKmem(struct page *page); |
373 | ||
374 | /* | |
375 | * After the initialization objcg->memcg is always pointing at | |
376 | * a valid memcg, but can be atomically swapped to the parent memcg. | |
377 | * | |
378 | * The caller must ensure that the returned memcg won't be released: | |
379 | * e.g. acquire the rcu_read_lock or css_set_lock. | |
380 | */ | |
381 | static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) | |
382 | { | |
383 | return READ_ONCE(objcg->memcg); | |
384 | } | |
385 | ||
386 | /* | |
387 | * __page_memcg - get the memory cgroup associated with a non-kmem page | |
388 | * @page: a pointer to the page struct | |
389 | * | |
390 | * Returns a pointer to the memory cgroup associated with the page, | |
391 | * or NULL. This function assumes that the page is known to have a | |
392 | * proper memory cgroup pointer. It's not safe to call this function | |
393 | * against some type of pages, e.g. slab pages or ex-slab pages or | |
394 | * kmem pages. | |
395 | */ | |
396 | static inline struct mem_cgroup *__page_memcg(struct page *page) | |
397 | { | |
398 | unsigned long memcg_data = page->memcg_data; | |
399 | ||
400 | VM_BUG_ON_PAGE(PageSlab(page), page); | |
401 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); | |
402 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); | |
403 | ||
404 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
405 | } | |
406 | ||
407 | /* | |
408 | * __page_objcg - get the object cgroup associated with a kmem page | |
409 | * @page: a pointer to the page struct | |
410 | * | |
411 | * Returns a pointer to the object cgroup associated with the page, | |
412 | * or NULL. This function assumes that the page is known to have a | |
413 | * proper object cgroup pointer. It's not safe to call this function | |
414 | * against some type of pages, e.g. slab pages or ex-slab pages or | |
415 | * LRU pages. | |
416 | */ | |
417 | static inline struct obj_cgroup *__page_objcg(struct page *page) | |
418 | { | |
419 | unsigned long memcg_data = page->memcg_data; | |
420 | ||
421 | VM_BUG_ON_PAGE(PageSlab(page), page); | |
422 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); | |
423 | VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page); | |
424 | ||
425 | return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
426 | } | |
427 | ||
bcfe06bf RG |
428 | /* |
429 | * page_memcg - get the memory cgroup associated with a page | |
430 | * @page: a pointer to the page struct | |
431 | * | |
432 | * Returns a pointer to the memory cgroup associated with the page, | |
433 | * or NULL. This function assumes that the page is known to have a | |
434 | * proper memory cgroup pointer. It's not safe to call this function | |
435 | * against some type of pages, e.g. slab pages or ex-slab pages. | |
436 | * | |
b4e0b68f MS |
437 | * For a non-kmem page any of the following ensures page and memcg binding |
438 | * stability: | |
439 | * | |
bcfe06bf RG |
440 | * - the page lock |
441 | * - LRU isolation | |
442 | * - lock_page_memcg() | |
443 | * - exclusive reference | |
b4e0b68f MS |
444 | * |
445 | * For a kmem page a caller should hold an rcu read lock to protect memcg | |
446 | * associated with a kmem page from being released. | |
bcfe06bf RG |
447 | */ |
448 | static inline struct mem_cgroup *page_memcg(struct page *page) | |
449 | { | |
b4e0b68f MS |
450 | if (PageMemcgKmem(page)) |
451 | return obj_cgroup_memcg(__page_objcg(page)); | |
452 | else | |
453 | return __page_memcg(page); | |
bcfe06bf RG |
454 | } |
455 | ||
456 | /* | |
457 | * page_memcg_rcu - locklessly get the memory cgroup associated with a page | |
458 | * @page: a pointer to the page struct | |
459 | * | |
460 | * Returns a pointer to the memory cgroup associated with the page, | |
461 | * or NULL. This function assumes that the page is known to have a | |
462 | * proper memory cgroup pointer. It's not safe to call this function | |
463 | * against some type of pages, e.g. slab pages or ex-slab pages. | |
464 | */ | |
465 | static inline struct mem_cgroup *page_memcg_rcu(struct page *page) | |
466 | { | |
b4e0b68f MS |
467 | unsigned long memcg_data = READ_ONCE(page->memcg_data); |
468 | ||
bcfe06bf RG |
469 | VM_BUG_ON_PAGE(PageSlab(page), page); |
470 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
471 | ||
b4e0b68f MS |
472 | if (memcg_data & MEMCG_DATA_KMEM) { |
473 | struct obj_cgroup *objcg; | |
474 | ||
475 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
476 | return obj_cgroup_memcg(objcg); | |
477 | } | |
478 | ||
479 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
bcfe06bf RG |
480 | } |
481 | ||
482 | /* | |
483 | * page_memcg_check - get the memory cgroup associated with a page | |
484 | * @page: a pointer to the page struct | |
485 | * | |
486 | * Returns a pointer to the memory cgroup associated with the page, | |
b4e0b68f | 487 | * or NULL. This function unlike page_memcg() can take any page |
bcfe06bf | 488 | * as an argument. It has to be used in cases when it's not known if a page |
b4e0b68f MS |
489 | * has an associated memory cgroup pointer or an object cgroups vector or |
490 | * an object cgroup. | |
491 | * | |
492 | * For a non-kmem page any of the following ensures page and memcg binding | |
493 | * stability: | |
bcfe06bf | 494 | * |
bcfe06bf RG |
495 | * - the page lock |
496 | * - LRU isolation | |
497 | * - lock_page_memcg() | |
498 | * - exclusive reference | |
b4e0b68f MS |
499 | * |
500 | * For a kmem page a caller should hold an rcu read lock to protect memcg | |
501 | * associated with a kmem page from being released. | |
bcfe06bf RG |
502 | */ |
503 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
504 | { | |
505 | /* | |
506 | * Because page->memcg_data might be changed asynchronously | |
507 | * for slab pages, READ_ONCE() should be used here. | |
508 | */ | |
509 | unsigned long memcg_data = READ_ONCE(page->memcg_data); | |
510 | ||
87944e29 | 511 | if (memcg_data & MEMCG_DATA_OBJCGS) |
bcfe06bf RG |
512 | return NULL; |
513 | ||
b4e0b68f MS |
514 | if (memcg_data & MEMCG_DATA_KMEM) { |
515 | struct obj_cgroup *objcg; | |
516 | ||
517 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
518 | return obj_cgroup_memcg(objcg); | |
519 | } | |
520 | ||
18b2db3b RG |
521 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
522 | } | |
523 | ||
bd290e1e | 524 | #ifdef CONFIG_MEMCG_KMEM |
18b2db3b RG |
525 | /* |
526 | * PageMemcgKmem - check if the page has MemcgKmem flag set | |
527 | * @page: a pointer to the page struct | |
528 | * | |
529 | * Checks if the page has MemcgKmem flag set. The caller must ensure that | |
530 | * the page has an associated memory cgroup. It's not safe to call this function | |
531 | * against some types of pages, e.g. slab pages. | |
532 | */ | |
533 | static inline bool PageMemcgKmem(struct page *page) | |
534 | { | |
535 | VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page); | |
536 | return page->memcg_data & MEMCG_DATA_KMEM; | |
bcfe06bf RG |
537 | } |
538 | ||
270c6a71 RG |
539 | /* |
540 | * page_objcgs - get the object cgroups vector associated with a page | |
541 | * @page: a pointer to the page struct | |
542 | * | |
543 | * Returns a pointer to the object cgroups vector associated with the page, | |
544 | * or NULL. This function assumes that the page is known to have an | |
545 | * associated object cgroups vector. It's not safe to call this function | |
546 | * against pages, which might have an associated memory cgroup: e.g. | |
547 | * kernel stack pages. | |
548 | */ | |
549 | static inline struct obj_cgroup **page_objcgs(struct page *page) | |
550 | { | |
87944e29 RG |
551 | unsigned long memcg_data = READ_ONCE(page->memcg_data); |
552 | ||
553 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page); | |
18b2db3b | 554 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); |
87944e29 RG |
555 | |
556 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
270c6a71 RG |
557 | } |
558 | ||
559 | /* | |
560 | * page_objcgs_check - get the object cgroups vector associated with a page | |
561 | * @page: a pointer to the page struct | |
562 | * | |
563 | * Returns a pointer to the object cgroups vector associated with the page, | |
564 | * or NULL. This function is safe to use if the page can be directly associated | |
565 | * with a memory cgroup. | |
566 | */ | |
567 | static inline struct obj_cgroup **page_objcgs_check(struct page *page) | |
568 | { | |
569 | unsigned long memcg_data = READ_ONCE(page->memcg_data); | |
570 | ||
87944e29 RG |
571 | if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS)) |
572 | return NULL; | |
270c6a71 | 573 | |
18b2db3b RG |
574 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); |
575 | ||
87944e29 | 576 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
270c6a71 RG |
577 | } |
578 | ||
270c6a71 | 579 | #else |
bd290e1e MS |
580 | static inline bool PageMemcgKmem(struct page *page) |
581 | { | |
582 | return false; | |
583 | } | |
584 | ||
270c6a71 RG |
585 | static inline struct obj_cgroup **page_objcgs(struct page *page) |
586 | { | |
587 | return NULL; | |
588 | } | |
589 | ||
590 | static inline struct obj_cgroup **page_objcgs_check(struct page *page) | |
591 | { | |
592 | return NULL; | |
593 | } | |
270c6a71 RG |
594 | #endif |
595 | ||
dfd2f10c KT |
596 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
597 | { | |
598 | return (memcg == root_mem_cgroup); | |
599 | } | |
600 | ||
23047a96 JW |
601 | static inline bool mem_cgroup_disabled(void) |
602 | { | |
603 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
604 | } | |
605 | ||
f56ce412 JW |
606 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
607 | struct mem_cgroup *memcg, | |
608 | unsigned long *min, | |
609 | unsigned long *low) | |
9783aa99 | 610 | { |
f56ce412 JW |
611 | *min = *low = 0; |
612 | ||
1bc63fb1 | 613 | if (mem_cgroup_disabled()) |
f56ce412 | 614 | return; |
1bc63fb1 | 615 | |
22f7496f YS |
616 | /* |
617 | * There is no reclaim protection applied to a targeted reclaim. | |
618 | * We are special casing this specific case here because | |
619 | * mem_cgroup_protected calculation is not robust enough to keep | |
620 | * the protection invariant for calculated effective values for | |
621 | * parallel reclaimers with different reclaim target. This is | |
622 | * especially a problem for tail memcgs (as they have pages on LRU) | |
623 | * which would want to have effective values 0 for targeted reclaim | |
624 | * but a different value for external reclaim. | |
625 | * | |
626 | * Example | |
627 | * Let's have global and A's reclaim in parallel: | |
628 | * | | |
629 | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) | |
630 | * |\ | |
631 | * | C (low = 1G, usage = 2.5G) | |
632 | * B (low = 1G, usage = 0.5G) | |
633 | * | |
634 | * For the global reclaim | |
635 | * A.elow = A.low | |
636 | * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow | |
637 | * C.elow = min(C.usage, C.low) | |
638 | * | |
639 | * With the effective values resetting we have A reclaim | |
640 | * A.elow = 0 | |
641 | * B.elow = B.low | |
642 | * C.elow = C.low | |
643 | * | |
644 | * If the global reclaim races with A's reclaim then | |
645 | * B.elow = C.elow = 0 because children_low_usage > A.elow) | |
646 | * is possible and reclaiming B would be violating the protection. | |
647 | * | |
648 | */ | |
649 | if (root == memcg) | |
f56ce412 | 650 | return; |
9783aa99 | 651 | |
f56ce412 JW |
652 | *min = READ_ONCE(memcg->memory.emin); |
653 | *low = READ_ONCE(memcg->memory.elow); | |
9783aa99 CD |
654 | } |
655 | ||
45c7f7e1 CD |
656 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
657 | struct mem_cgroup *memcg); | |
658 | ||
659 | static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) | |
660 | { | |
661 | /* | |
662 | * The root memcg doesn't account charges, and doesn't support | |
663 | * protection. | |
664 | */ | |
665 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); | |
666 | ||
667 | } | |
668 | ||
669 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
670 | { | |
671 | if (!mem_cgroup_supports_protection(memcg)) | |
672 | return false; | |
673 | ||
674 | return READ_ONCE(memcg->memory.elow) >= | |
675 | page_counter_read(&memcg->memory); | |
676 | } | |
677 | ||
678 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
679 | { | |
680 | if (!mem_cgroup_supports_protection(memcg)) | |
681 | return false; | |
682 | ||
683 | return READ_ONCE(memcg->memory.emin) >= | |
684 | page_counter_read(&memcg->memory); | |
685 | } | |
241994ed | 686 | |
2c8d8f97 SB |
687 | int __mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
688 | gfp_t gfp_mask); | |
689 | static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | |
690 | gfp_t gfp_mask) | |
691 | { | |
692 | if (mem_cgroup_disabled()) | |
693 | return 0; | |
694 | return __mem_cgroup_charge(page, mm, gfp_mask); | |
695 | } | |
696 | ||
0add0c77 SB |
697 | int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, |
698 | gfp_t gfp, swp_entry_t entry); | |
699 | void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); | |
3fea5a49 | 700 | |
2c8d8f97 SB |
701 | void __mem_cgroup_uncharge(struct page *page); |
702 | static inline void mem_cgroup_uncharge(struct page *page) | |
703 | { | |
704 | if (mem_cgroup_disabled()) | |
705 | return; | |
706 | __mem_cgroup_uncharge(page); | |
707 | } | |
708 | ||
709 | void __mem_cgroup_uncharge_list(struct list_head *page_list); | |
710 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) | |
711 | { | |
712 | if (mem_cgroup_disabled()) | |
713 | return; | |
714 | __mem_cgroup_uncharge_list(page_list); | |
715 | } | |
569b846d | 716 | |
6a93ca8f | 717 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); |
569b846d | 718 | |
55779ec7 | 719 | /** |
867e5e1d | 720 | * mem_cgroup_lruvec - get the lru list vector for a memcg & node |
55779ec7 | 721 | * @memcg: memcg of the wanted lruvec |
9a1ac228 | 722 | * @pgdat: pglist_data |
55779ec7 | 723 | * |
867e5e1d | 724 | * Returns the lru list vector holding pages for a given @memcg & |
9a1ac228 | 725 | * @pgdat combination. This can be the node lruvec, if the memory |
867e5e1d | 726 | * controller is disabled. |
55779ec7 | 727 | */ |
867e5e1d JW |
728 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
729 | struct pglist_data *pgdat) | |
55779ec7 | 730 | { |
ef8f2327 | 731 | struct mem_cgroup_per_node *mz; |
55779ec7 JW |
732 | struct lruvec *lruvec; |
733 | ||
734 | if (mem_cgroup_disabled()) { | |
867e5e1d | 735 | lruvec = &pgdat->__lruvec; |
55779ec7 JW |
736 | goto out; |
737 | } | |
738 | ||
1b05117d JW |
739 | if (!memcg) |
740 | memcg = root_mem_cgroup; | |
741 | ||
a3747b53 | 742 | mz = memcg->nodeinfo[pgdat->node_id]; |
55779ec7 JW |
743 | lruvec = &mz->lruvec; |
744 | out: | |
745 | /* | |
746 | * Since a node can be onlined after the mem_cgroup was created, | |
599d0c95 | 747 | * we have to be prepared to initialize lruvec->pgdat here; |
55779ec7 JW |
748 | * and if offlined then reonlined, we need to reinitialize it. |
749 | */ | |
ef8f2327 MG |
750 | if (unlikely(lruvec->pgdat != pgdat)) |
751 | lruvec->pgdat = pgdat; | |
55779ec7 JW |
752 | return lruvec; |
753 | } | |
754 | ||
9a1ac228 HS |
755 | /** |
756 | * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page | |
757 | * @page: the page | |
9a1ac228 HS |
758 | * |
759 | * This function relies on page->mem_cgroup being stable. | |
760 | */ | |
a984226f | 761 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) |
9a1ac228 | 762 | { |
a984226f | 763 | pg_data_t *pgdat = page_pgdat(page); |
9a1ac228 HS |
764 | struct mem_cgroup *memcg = page_memcg(page); |
765 | ||
7ea510b9 | 766 | VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); |
9a1ac228 HS |
767 | return mem_cgroup_lruvec(memcg, pgdat); |
768 | } | |
c9b0ed51 | 769 | |
64219994 | 770 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e993d905 | 771 | |
d46eb14b SB |
772 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); |
773 | ||
6168d0da AS |
774 | struct lruvec *lock_page_lruvec(struct page *page); |
775 | struct lruvec *lock_page_lruvec_irq(struct page *page); | |
776 | struct lruvec *lock_page_lruvec_irqsave(struct page *page, | |
777 | unsigned long *flags); | |
778 | ||
779 | #ifdef CONFIG_DEBUG_VM | |
780 | void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page); | |
781 | #else | |
782 | static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) | |
783 | { | |
784 | } | |
785 | #endif | |
786 | ||
33398cf2 MH |
787 | static inline |
788 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
789 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
790 | } | |
791 | ||
bf4f0599 RG |
792 | static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
793 | { | |
794 | return percpu_ref_tryget(&objcg->refcnt); | |
795 | } | |
796 | ||
797 | static inline void obj_cgroup_get(struct obj_cgroup *objcg) | |
798 | { | |
799 | percpu_ref_get(&objcg->refcnt); | |
800 | } | |
801 | ||
b4e0b68f MS |
802 | static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, |
803 | unsigned long nr) | |
bf4f0599 | 804 | { |
b4e0b68f | 805 | percpu_ref_get_many(&objcg->refcnt, nr); |
bf4f0599 RG |
806 | } |
807 | ||
b4e0b68f | 808 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
bf4f0599 | 809 | { |
b4e0b68f | 810 | percpu_ref_put(&objcg->refcnt); |
bf4f0599 RG |
811 | } |
812 | ||
dc0b5864 RG |
813 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
814 | { | |
d46eb14b SB |
815 | if (memcg) |
816 | css_put(&memcg->css); | |
dc0b5864 RG |
817 | } |
818 | ||
8e8ae645 JW |
819 | #define mem_cgroup_from_counter(counter, member) \ |
820 | container_of(counter, struct mem_cgroup, member) | |
821 | ||
33398cf2 MH |
822 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
823 | struct mem_cgroup *, | |
824 | struct mem_cgroup_reclaim_cookie *); | |
825 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
7c5f64f8 VD |
826 | int mem_cgroup_scan_tasks(struct mem_cgroup *, |
827 | int (*)(struct task_struct *, void *), void *); | |
33398cf2 | 828 | |
23047a96 JW |
829 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
830 | { | |
831 | if (mem_cgroup_disabled()) | |
832 | return 0; | |
833 | ||
73f576c0 | 834 | return memcg->id.id; |
23047a96 | 835 | } |
73f576c0 | 836 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
23047a96 | 837 | |
aa9694bb CD |
838 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
839 | { | |
840 | return mem_cgroup_from_css(seq_css(m)); | |
841 | } | |
842 | ||
2262185c RG |
843 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
844 | { | |
845 | struct mem_cgroup_per_node *mz; | |
846 | ||
847 | if (mem_cgroup_disabled()) | |
848 | return NULL; | |
849 | ||
850 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
851 | return mz->memcg; | |
852 | } | |
853 | ||
8e8ae645 JW |
854 | /** |
855 | * parent_mem_cgroup - find the accounting parent of a memcg | |
856 | * @memcg: memcg whose parent to find | |
857 | * | |
858 | * Returns the parent memcg, or NULL if this is the root or the memory | |
859 | * controller is in legacy no-hierarchy mode. | |
860 | */ | |
861 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
862 | { | |
863 | if (!memcg->memory.parent) | |
864 | return NULL; | |
865 | return mem_cgroup_from_counter(memcg->memory.parent, memory); | |
866 | } | |
867 | ||
33398cf2 MH |
868 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
869 | struct mem_cgroup *root) | |
870 | { | |
871 | if (root == memcg) | |
872 | return true; | |
33398cf2 MH |
873 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); |
874 | } | |
e1aab161 | 875 | |
2314b42d JW |
876 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
877 | struct mem_cgroup *memcg) | |
2e4d4091 | 878 | { |
587af308 | 879 | struct mem_cgroup *task_memcg; |
413918bb | 880 | bool match = false; |
c3ac9a8a | 881 | |
2e4d4091 | 882 | rcu_read_lock(); |
587af308 | 883 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 884 | if (task_memcg) |
2314b42d | 885 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 886 | rcu_read_unlock(); |
c3ac9a8a | 887 | return match; |
2e4d4091 | 888 | } |
8a9f3ccd | 889 | |
64219994 | 890 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 891 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 892 | |
eb01aaab VD |
893 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
894 | { | |
895 | if (mem_cgroup_disabled()) | |
896 | return true; | |
897 | return !!(memcg->css.flags & CSS_ONLINE); | |
898 | } | |
899 | ||
33398cf2 | 900 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
b4536f0c | 901 | int zid, int nr_pages); |
33398cf2 | 902 | |
b4536f0c MH |
903 | static inline |
904 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
905 | enum lru_list lru, int zone_idx) | |
906 | { | |
907 | struct mem_cgroup_per_node *mz; | |
908 | ||
909 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
e0e3f42f | 910 | return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); |
33398cf2 MH |
911 | } |
912 | ||
b23afb93 TH |
913 | void mem_cgroup_handle_over_high(void); |
914 | ||
bbec2e15 | 915 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
7c5f64f8 | 916 | |
9783aa99 CD |
917 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg); |
918 | ||
f0c867d9 | 919 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
64219994 | 920 | struct task_struct *p); |
58ae83db | 921 | |
f0c867d9 | 922 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); |
923 | ||
29ef680a | 924 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 | 925 | { |
29ef680a MH |
926 | WARN_ON(current->in_user_fault); |
927 | current->in_user_fault = 1; | |
519e5247 JW |
928 | } |
929 | ||
29ef680a | 930 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 | 931 | { |
29ef680a MH |
932 | WARN_ON(!current->in_user_fault); |
933 | current->in_user_fault = 0; | |
519e5247 JW |
934 | } |
935 | ||
3812c8c8 JW |
936 | static inline bool task_in_memcg_oom(struct task_struct *p) |
937 | { | |
626ebc41 | 938 | return p->memcg_in_oom; |
3812c8c8 JW |
939 | } |
940 | ||
49426420 | 941 | bool mem_cgroup_oom_synchronize(bool wait); |
3d8b38eb RG |
942 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, |
943 | struct mem_cgroup *oom_domain); | |
944 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); | |
3812c8c8 | 945 | |
c255a458 | 946 | #ifdef CONFIG_MEMCG_SWAP |
eccb52e7 | 947 | extern bool cgroup_memory_noswap; |
c077719b | 948 | #endif |
f8d66542 | 949 | |
1c824a68 | 950 | void lock_page_memcg(struct page *page); |
62cccb8c | 951 | void unlock_page_memcg(struct page *page); |
d7365e78 | 952 | |
db9adbcb | 953 | void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); |
2a2e4885 | 954 | |
04fecbf5 | 955 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 956 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 957 | int idx, int val) |
2a2e4885 | 958 | { |
c3cc3911 JW |
959 | unsigned long flags; |
960 | ||
961 | local_irq_save(flags); | |
a983b5eb | 962 | __mod_memcg_state(memcg, idx, val); |
c3cc3911 | 963 | local_irq_restore(flags); |
2a2e4885 JW |
964 | } |
965 | ||
7490a2d2 SB |
966 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
967 | { | |
96e51ccf | 968 | return READ_ONCE(memcg->vmstats.state[idx]); |
7490a2d2 SB |
969 | } |
970 | ||
42a30035 JW |
971 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
972 | enum node_stat_item idx) | |
973 | { | |
974 | struct mem_cgroup_per_node *pn; | |
42a30035 JW |
975 | |
976 | if (mem_cgroup_disabled()) | |
977 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
978 | ||
979 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
96e51ccf | 980 | return READ_ONCE(pn->lruvec_stats.state[idx]); |
42a30035 JW |
981 | } |
982 | ||
205b20cc JW |
983 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
984 | enum node_stat_item idx) | |
2a7106f2 | 985 | { |
00f3ca2c | 986 | struct mem_cgroup_per_node *pn; |
815744d7 JW |
987 | long x = 0; |
988 | int cpu; | |
00f3ca2c JW |
989 | |
990 | if (mem_cgroup_disabled()) | |
991 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
992 | ||
993 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
815744d7 | 994 | for_each_possible_cpu(cpu) |
7e1c0d6f | 995 | x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); |
a983b5eb JW |
996 | #ifdef CONFIG_SMP |
997 | if (x < 0) | |
998 | x = 0; | |
999 | #endif | |
1000 | return x; | |
2a7106f2 GT |
1001 | } |
1002 | ||
aa48e47e SB |
1003 | void mem_cgroup_flush_stats(void); |
1004 | ||
eedc4e5a RG |
1005 | void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
1006 | int val); | |
da3ceeff | 1007 | void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); |
991e7673 | 1008 | |
da3ceeff | 1009 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1010 | int val) |
1011 | { | |
1012 | unsigned long flags; | |
1013 | ||
1014 | local_irq_save(flags); | |
da3ceeff | 1015 | __mod_lruvec_kmem_state(p, idx, val); |
991e7673 SB |
1016 | local_irq_restore(flags); |
1017 | } | |
1018 | ||
eedc4e5a RG |
1019 | static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, |
1020 | enum node_stat_item idx, int val) | |
1021 | { | |
1022 | unsigned long flags; | |
1023 | ||
1024 | local_irq_save(flags); | |
1025 | __mod_memcg_lruvec_state(lruvec, idx, val); | |
1026 | local_irq_restore(flags); | |
1027 | } | |
1028 | ||
db9adbcb JW |
1029 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, |
1030 | unsigned long count); | |
c9019e9b | 1031 | |
2262185c | 1032 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
e27be240 JW |
1033 | enum vm_event_item idx, |
1034 | unsigned long count) | |
2262185c | 1035 | { |
c3cc3911 JW |
1036 | unsigned long flags; |
1037 | ||
1038 | local_irq_save(flags); | |
a983b5eb | 1039 | __count_memcg_events(memcg, idx, count); |
c3cc3911 | 1040 | local_irq_restore(flags); |
2262185c RG |
1041 | } |
1042 | ||
1043 | static inline void count_memcg_page_event(struct page *page, | |
e27be240 | 1044 | enum vm_event_item idx) |
2262185c | 1045 | { |
bcfe06bf RG |
1046 | struct mem_cgroup *memcg = page_memcg(page); |
1047 | ||
1048 | if (memcg) | |
1049 | count_memcg_events(memcg, idx, 1); | |
2262185c RG |
1050 | } |
1051 | ||
1052 | static inline void count_memcg_event_mm(struct mm_struct *mm, | |
1053 | enum vm_event_item idx) | |
68ae564b | 1054 | { |
33398cf2 MH |
1055 | struct mem_cgroup *memcg; |
1056 | ||
68ae564b DR |
1057 | if (mem_cgroup_disabled()) |
1058 | return; | |
33398cf2 MH |
1059 | |
1060 | rcu_read_lock(); | |
1061 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
fe6bdfc8 | 1062 | if (likely(memcg)) |
c9019e9b | 1063 | count_memcg_events(memcg, idx, 1); |
33398cf2 | 1064 | rcu_read_unlock(); |
68ae564b | 1065 | } |
c9019e9b | 1066 | |
e27be240 JW |
1067 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1068 | enum memcg_memory_event event) | |
c9019e9b | 1069 | { |
8b21ca02 MS |
1070 | bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || |
1071 | event == MEMCG_SWAP_FAIL; | |
1072 | ||
1e577f97 | 1073 | atomic_long_inc(&memcg->memory_events_local[event]); |
8b21ca02 MS |
1074 | if (!swap_event) |
1075 | cgroup_file_notify(&memcg->events_local_file); | |
1e577f97 | 1076 | |
9852ae3f CD |
1077 | do { |
1078 | atomic_long_inc(&memcg->memory_events[event]); | |
8b21ca02 MS |
1079 | if (swap_event) |
1080 | cgroup_file_notify(&memcg->swap_events_file); | |
1081 | else | |
1082 | cgroup_file_notify(&memcg->events_file); | |
9852ae3f | 1083 | |
04fd61a4 YS |
1084 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1085 | break; | |
9852ae3f CD |
1086 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
1087 | break; | |
1088 | } while ((memcg = parent_mem_cgroup(memcg)) && | |
1089 | !mem_cgroup_is_root(memcg)); | |
c9019e9b JW |
1090 | } |
1091 | ||
fe6bdfc8 RG |
1092 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1093 | enum memcg_memory_event event) | |
1094 | { | |
1095 | struct mem_cgroup *memcg; | |
1096 | ||
1097 | if (mem_cgroup_disabled()) | |
1098 | return; | |
1099 | ||
1100 | rcu_read_lock(); | |
1101 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
1102 | if (likely(memcg)) | |
1103 | memcg_memory_event(memcg, event); | |
1104 | rcu_read_unlock(); | |
1105 | } | |
1106 | ||
be6c8982 | 1107 | void split_page_memcg(struct page *head, unsigned int nr); |
ca3e0214 | 1108 | |
2d146aa3 JW |
1109 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
1110 | gfp_t gfp_mask, | |
1111 | unsigned long *total_scanned); | |
1112 | ||
c255a458 | 1113 | #else /* CONFIG_MEMCG */ |
23047a96 JW |
1114 | |
1115 | #define MEM_CGROUP_ID_SHIFT 0 | |
1116 | #define MEM_CGROUP_ID_MAX 0 | |
1117 | ||
bcfe06bf RG |
1118 | static inline struct mem_cgroup *page_memcg(struct page *page) |
1119 | { | |
1120 | return NULL; | |
1121 | } | |
1122 | ||
1123 | static inline struct mem_cgroup *page_memcg_rcu(struct page *page) | |
1124 | { | |
1125 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1126 | return NULL; | |
1127 | } | |
1128 | ||
1129 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
1130 | { | |
1131 | return NULL; | |
1132 | } | |
1133 | ||
18b2db3b RG |
1134 | static inline bool PageMemcgKmem(struct page *page) |
1135 | { | |
1136 | return false; | |
1137 | } | |
1138 | ||
dfd2f10c KT |
1139 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
1140 | { | |
1141 | return true; | |
1142 | } | |
1143 | ||
23047a96 JW |
1144 | static inline bool mem_cgroup_disabled(void) |
1145 | { | |
1146 | return true; | |
1147 | } | |
1148 | ||
e27be240 JW |
1149 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1150 | enum memcg_memory_event event) | |
241994ed JW |
1151 | { |
1152 | } | |
1153 | ||
fe6bdfc8 RG |
1154 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1155 | enum memcg_memory_event event) | |
1156 | { | |
1157 | } | |
1158 | ||
f56ce412 JW |
1159 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
1160 | struct mem_cgroup *memcg, | |
1161 | unsigned long *min, | |
1162 | unsigned long *low) | |
9783aa99 | 1163 | { |
f56ce412 | 1164 | *min = *low = 0; |
9783aa99 CD |
1165 | } |
1166 | ||
45c7f7e1 CD |
1167 | static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
1168 | struct mem_cgroup *memcg) | |
1169 | { | |
1170 | } | |
1171 | ||
1172 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
1173 | { | |
1174 | return false; | |
1175 | } | |
1176 | ||
1177 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
241994ed | 1178 | { |
45c7f7e1 | 1179 | return false; |
241994ed JW |
1180 | } |
1181 | ||
3fea5a49 | 1182 | static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
d9eb1ea2 | 1183 | gfp_t gfp_mask) |
3fea5a49 JW |
1184 | { |
1185 | return 0; | |
1186 | } | |
1187 | ||
0add0c77 SB |
1188 | static inline int mem_cgroup_swapin_charge_page(struct page *page, |
1189 | struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) | |
1190 | { | |
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) | |
1195 | { | |
1196 | } | |
1197 | ||
0a31bc97 | 1198 | static inline void mem_cgroup_uncharge(struct page *page) |
569b846d KH |
1199 | { |
1200 | } | |
1201 | ||
747db954 | 1202 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
1203 | { |
1204 | } | |
1205 | ||
6a93ca8f | 1206 | static inline void mem_cgroup_migrate(struct page *old, struct page *new) |
69029cd5 KH |
1207 | { |
1208 | } | |
1209 | ||
867e5e1d JW |
1210 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
1211 | struct pglist_data *pgdat) | |
08e552c6 | 1212 | { |
867e5e1d | 1213 | return &pgdat->__lruvec; |
08e552c6 KH |
1214 | } |
1215 | ||
a984226f | 1216 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) |
66e1707b | 1217 | { |
a984226f MS |
1218 | pg_data_t *pgdat = page_pgdat(page); |
1219 | ||
867e5e1d | 1220 | return &pgdat->__lruvec; |
66e1707b BS |
1221 | } |
1222 | ||
2d146aa3 JW |
1223 | static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) |
1224 | { | |
1225 | } | |
1226 | ||
b910718a JW |
1227 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
1228 | { | |
1229 | return NULL; | |
1230 | } | |
1231 | ||
587af308 | 1232 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 1233 | struct mem_cgroup *memcg) |
bed7161a | 1234 | { |
587af308 | 1235 | return true; |
bed7161a BS |
1236 | } |
1237 | ||
d46eb14b SB |
1238 | static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) |
1239 | { | |
1240 | return NULL; | |
1241 | } | |
1242 | ||
c74d40e8 DS |
1243 | static inline |
1244 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) | |
1245 | { | |
1246 | return NULL; | |
1247 | } | |
1248 | ||
dc0b5864 RG |
1249 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
1250 | { | |
1251 | } | |
1252 | ||
6168d0da AS |
1253 | static inline struct lruvec *lock_page_lruvec(struct page *page) |
1254 | { | |
1255 | struct pglist_data *pgdat = page_pgdat(page); | |
1256 | ||
1257 | spin_lock(&pgdat->__lruvec.lru_lock); | |
1258 | return &pgdat->__lruvec; | |
1259 | } | |
1260 | ||
1261 | static inline struct lruvec *lock_page_lruvec_irq(struct page *page) | |
1262 | { | |
1263 | struct pglist_data *pgdat = page_pgdat(page); | |
1264 | ||
1265 | spin_lock_irq(&pgdat->__lruvec.lru_lock); | |
1266 | return &pgdat->__lruvec; | |
1267 | } | |
1268 | ||
1269 | static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page, | |
1270 | unsigned long *flagsp) | |
1271 | { | |
1272 | struct pglist_data *pgdat = page_pgdat(page); | |
1273 | ||
1274 | spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); | |
1275 | return &pgdat->__lruvec; | |
1276 | } | |
1277 | ||
5660048c JW |
1278 | static inline struct mem_cgroup * |
1279 | mem_cgroup_iter(struct mem_cgroup *root, | |
1280 | struct mem_cgroup *prev, | |
1281 | struct mem_cgroup_reclaim_cookie *reclaim) | |
1282 | { | |
1283 | return NULL; | |
1284 | } | |
1285 | ||
1286 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
1287 | struct mem_cgroup *prev) | |
1288 | { | |
1289 | } | |
1290 | ||
7c5f64f8 VD |
1291 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1292 | int (*fn)(struct task_struct *, void *), void *arg) | |
1293 | { | |
1294 | return 0; | |
1295 | } | |
1296 | ||
23047a96 | 1297 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
f8d66542 | 1298 | { |
23047a96 JW |
1299 | return 0; |
1300 | } | |
1301 | ||
1302 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
1303 | { | |
1304 | WARN_ON_ONCE(id); | |
1305 | /* XXX: This should always return root_mem_cgroup */ | |
1306 | return NULL; | |
f8d66542 | 1307 | } |
a636b327 | 1308 | |
aa9694bb CD |
1309 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
1310 | { | |
1311 | return NULL; | |
1312 | } | |
1313 | ||
2262185c RG |
1314 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
1315 | { | |
1316 | return NULL; | |
1317 | } | |
1318 | ||
eb01aaab | 1319 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
14797e23 | 1320 | { |
13308ca9 | 1321 | return true; |
14797e23 KM |
1322 | } |
1323 | ||
b4536f0c MH |
1324 | static inline |
1325 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
1326 | enum lru_list lru, int zone_idx) | |
1327 | { | |
1328 | return 0; | |
1329 | } | |
a3d8e054 | 1330 | |
bbec2e15 | 1331 | static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
7c5f64f8 VD |
1332 | { |
1333 | return 0; | |
1334 | } | |
1335 | ||
9783aa99 CD |
1336 | static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1337 | { | |
1338 | return 0; | |
1339 | } | |
1340 | ||
e222432b | 1341 | static inline void |
f0c867d9 | 1342 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1343 | { | |
1344 | } | |
1345 | ||
1346 | static inline void | |
1347 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | |
e222432b BS |
1348 | { |
1349 | } | |
1350 | ||
1c824a68 | 1351 | static inline void lock_page_memcg(struct page *page) |
89c06bd5 KH |
1352 | { |
1353 | } | |
1354 | ||
62cccb8c | 1355 | static inline void unlock_page_memcg(struct page *page) |
89c06bd5 KH |
1356 | { |
1357 | } | |
1358 | ||
b23afb93 TH |
1359 | static inline void mem_cgroup_handle_over_high(void) |
1360 | { | |
1361 | } | |
1362 | ||
29ef680a | 1363 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 JW |
1364 | { |
1365 | } | |
1366 | ||
29ef680a | 1367 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 JW |
1368 | { |
1369 | } | |
1370 | ||
3812c8c8 JW |
1371 | static inline bool task_in_memcg_oom(struct task_struct *p) |
1372 | { | |
1373 | return false; | |
1374 | } | |
1375 | ||
49426420 | 1376 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
1377 | { |
1378 | return false; | |
1379 | } | |
1380 | ||
3d8b38eb RG |
1381 | static inline struct mem_cgroup *mem_cgroup_get_oom_group( |
1382 | struct task_struct *victim, struct mem_cgroup *oom_domain) | |
1383 | { | |
1384 | return NULL; | |
1385 | } | |
1386 | ||
1387 | static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |
1388 | { | |
1389 | } | |
1390 | ||
00f3ca2c | 1391 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1392 | int idx, |
00f3ca2c | 1393 | int nr) |
2a2e4885 JW |
1394 | { |
1395 | } | |
1396 | ||
00f3ca2c | 1397 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1398 | int idx, |
00f3ca2c | 1399 | int nr) |
2a2e4885 JW |
1400 | { |
1401 | } | |
1402 | ||
7490a2d2 SB |
1403 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
1404 | { | |
1405 | return 0; | |
1406 | } | |
1407 | ||
42a30035 JW |
1408 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1409 | enum node_stat_item idx) | |
1410 | { | |
1411 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1412 | } | |
1413 | ||
205b20cc JW |
1414 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1415 | enum node_stat_item idx) | |
2a7106f2 | 1416 | { |
00f3ca2c | 1417 | return node_page_state(lruvec_pgdat(lruvec), idx); |
2a7106f2 GT |
1418 | } |
1419 | ||
aa48e47e SB |
1420 | static inline void mem_cgroup_flush_stats(void) |
1421 | { | |
1422 | } | |
1423 | ||
eedc4e5a RG |
1424 | static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
1425 | enum node_stat_item idx, int val) | |
1426 | { | |
1427 | } | |
1428 | ||
da3ceeff | 1429 | static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
ec9f0238 RG |
1430 | int val) |
1431 | { | |
1432 | struct page *page = virt_to_head_page(p); | |
1433 | ||
1434 | __mod_node_page_state(page_pgdat(page), idx, val); | |
1435 | } | |
1436 | ||
da3ceeff | 1437 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1438 | int val) |
1439 | { | |
1440 | struct page *page = virt_to_head_page(p); | |
1441 | ||
1442 | mod_node_page_state(page_pgdat(page), idx, val); | |
1443 | } | |
1444 | ||
2262185c RG |
1445 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1446 | enum vm_event_item idx, | |
1447 | unsigned long count) | |
1448 | { | |
1449 | } | |
1450 | ||
9851ac13 KT |
1451 | static inline void __count_memcg_events(struct mem_cgroup *memcg, |
1452 | enum vm_event_item idx, | |
1453 | unsigned long count) | |
1454 | { | |
1455 | } | |
1456 | ||
2262185c | 1457 | static inline void count_memcg_page_event(struct page *page, |
04fecbf5 | 1458 | int idx) |
2262185c RG |
1459 | { |
1460 | } | |
1461 | ||
456f998e | 1462 | static inline |
2262185c | 1463 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) |
456f998e YH |
1464 | { |
1465 | } | |
6168d0da | 1466 | |
2d146aa3 JW |
1467 | static inline void split_page_memcg(struct page *head, unsigned int nr) |
1468 | { | |
1469 | } | |
1470 | ||
1471 | static inline | |
1472 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |
1473 | gfp_t gfp_mask, | |
1474 | unsigned long *total_scanned) | |
6168d0da | 1475 | { |
2d146aa3 | 1476 | return 0; |
6168d0da | 1477 | } |
c255a458 | 1478 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 1479 | |
da3ceeff | 1480 | static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1481 | { |
da3ceeff | 1482 | __mod_lruvec_kmem_state(p, idx, 1); |
ec9f0238 RG |
1483 | } |
1484 | ||
da3ceeff | 1485 | static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1486 | { |
da3ceeff | 1487 | __mod_lruvec_kmem_state(p, idx, -1); |
ec9f0238 RG |
1488 | } |
1489 | ||
7cf111bc JW |
1490 | static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) |
1491 | { | |
1492 | struct mem_cgroup *memcg; | |
1493 | ||
1494 | memcg = lruvec_memcg(lruvec); | |
1495 | if (!memcg) | |
1496 | return NULL; | |
1497 | memcg = parent_mem_cgroup(memcg); | |
1498 | if (!memcg) | |
1499 | return NULL; | |
1500 | return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); | |
1501 | } | |
1502 | ||
6168d0da AS |
1503 | static inline void unlock_page_lruvec(struct lruvec *lruvec) |
1504 | { | |
1505 | spin_unlock(&lruvec->lru_lock); | |
1506 | } | |
1507 | ||
1508 | static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) | |
1509 | { | |
1510 | spin_unlock_irq(&lruvec->lru_lock); | |
1511 | } | |
1512 | ||
1513 | static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, | |
1514 | unsigned long flags) | |
1515 | { | |
1516 | spin_unlock_irqrestore(&lruvec->lru_lock, flags); | |
1517 | } | |
1518 | ||
7467c391 MS |
1519 | /* Test requires a stable page->memcg binding, see page_memcg() */ |
1520 | static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) | |
f2e4d28d MS |
1521 | { |
1522 | return lruvec_pgdat(lruvec) == page_pgdat(page) && | |
1523 | lruvec_memcg(lruvec) == page_memcg(page); | |
1524 | } | |
1525 | ||
2a5e4e34 AD |
1526 | /* Don't lock again iff page's lruvec locked */ |
1527 | static inline struct lruvec *relock_page_lruvec_irq(struct page *page, | |
1528 | struct lruvec *locked_lruvec) | |
1529 | { | |
1530 | if (locked_lruvec) { | |
7467c391 | 1531 | if (page_matches_lruvec(page, locked_lruvec)) |
2a5e4e34 AD |
1532 | return locked_lruvec; |
1533 | ||
1534 | unlock_page_lruvec_irq(locked_lruvec); | |
1535 | } | |
1536 | ||
1537 | return lock_page_lruvec_irq(page); | |
1538 | } | |
1539 | ||
1540 | /* Don't lock again iff page's lruvec locked */ | |
1541 | static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, | |
1542 | struct lruvec *locked_lruvec, unsigned long *flags) | |
1543 | { | |
1544 | if (locked_lruvec) { | |
7467c391 | 1545 | if (page_matches_lruvec(page, locked_lruvec)) |
2a5e4e34 AD |
1546 | return locked_lruvec; |
1547 | ||
1548 | unlock_page_lruvec_irqrestore(locked_lruvec, *flags); | |
1549 | } | |
1550 | ||
1551 | return lock_page_lruvec_irqsave(page, flags); | |
1552 | } | |
1553 | ||
52ebea74 | 1554 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 1555 | |
841710aa | 1556 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
1557 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
1558 | unsigned long *pheadroom, unsigned long *pdirty, | |
1559 | unsigned long *pwriteback); | |
841710aa | 1560 | |
97b27821 TH |
1561 | void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, |
1562 | struct bdi_writeback *wb); | |
1563 | ||
1564 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, | |
1565 | struct bdi_writeback *wb) | |
1566 | { | |
08d1d0e6 BH |
1567 | if (mem_cgroup_disabled()) |
1568 | return; | |
1569 | ||
bcfe06bf | 1570 | if (unlikely(&page_memcg(page)->css != wb->memcg_css)) |
97b27821 TH |
1571 | mem_cgroup_track_foreign_dirty_slowpath(page, wb); |
1572 | } | |
1573 | ||
1574 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb); | |
1575 | ||
841710aa TH |
1576 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1577 | ||
1578 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
1579 | { | |
1580 | return NULL; | |
1581 | } | |
1582 | ||
c2aa723a | 1583 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
1584 | unsigned long *pfilepages, |
1585 | unsigned long *pheadroom, | |
c2aa723a TH |
1586 | unsigned long *pdirty, |
1587 | unsigned long *pwriteback) | |
1588 | { | |
1589 | } | |
1590 | ||
97b27821 TH |
1591 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, |
1592 | struct bdi_writeback *wb) | |
1593 | { | |
1594 | } | |
1595 | ||
1596 | static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) | |
1597 | { | |
1598 | } | |
1599 | ||
841710aa | 1600 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 1601 | |
e1aab161 | 1602 | struct sock; |
4b1327be WW |
1603 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, |
1604 | gfp_t gfp_mask); | |
baac50bb | 1605 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
d886f4e4 | 1606 | #ifdef CONFIG_MEMCG |
ef12947c JW |
1607 | extern struct static_key_false memcg_sockets_enabled_key; |
1608 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
2d758073 JW |
1609 | void mem_cgroup_sk_alloc(struct sock *sk); |
1610 | void mem_cgroup_sk_free(struct sock *sk); | |
baac50bb | 1611 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 1612 | { |
0db15298 | 1613 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
8e8ae645 | 1614 | return true; |
8e8ae645 JW |
1615 | do { |
1616 | if (time_before(jiffies, memcg->socket_pressure)) | |
1617 | return true; | |
1618 | } while ((memcg = parent_mem_cgroup(memcg))); | |
1619 | return false; | |
e805605c | 1620 | } |
0a432dcb | 1621 | |
e4262c4f YS |
1622 | int alloc_shrinker_info(struct mem_cgroup *memcg); |
1623 | void free_shrinker_info(struct mem_cgroup *memcg); | |
2bfd3637 | 1624 | void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); |
a178015c | 1625 | void reparent_shrinker_deferred(struct mem_cgroup *memcg); |
e805605c | 1626 | #else |
80e95fe0 | 1627 | #define mem_cgroup_sockets_enabled 0 |
2d758073 JW |
1628 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
1629 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
baac50bb | 1630 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
1631 | { |
1632 | return false; | |
1633 | } | |
0a432dcb | 1634 | |
2bfd3637 YS |
1635 | static inline void set_shrinker_bit(struct mem_cgroup *memcg, |
1636 | int nid, int shrinker_id) | |
0a432dcb YS |
1637 | { |
1638 | } | |
e805605c | 1639 | #endif |
7ae1e1d0 | 1640 | |
9b6f7e16 | 1641 | #ifdef CONFIG_MEMCG_KMEM |
4d5c8aed | 1642 | bool mem_cgroup_kmem_disabled(void); |
f4b00eab RG |
1643 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); |
1644 | void __memcg_kmem_uncharge_page(struct page *page, int order); | |
45264778 | 1645 | |
bf4f0599 RG |
1646 | struct obj_cgroup *get_obj_cgroup_from_current(void); |
1647 | ||
1648 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); | |
1649 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); | |
1650 | ||
ef12947c | 1651 | extern struct static_key_false memcg_kmem_enabled_key; |
749c5415 | 1652 | |
dbcf73e2 | 1653 | extern int memcg_nr_cache_ids; |
64219994 MH |
1654 | void memcg_get_cache_ids(void); |
1655 | void memcg_put_cache_ids(void); | |
ebe945c2 GC |
1656 | |
1657 | /* | |
1658 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
1659 | * check if the cache is valid (it is either valid or NULL). | |
1660 | * the slab_mutex must be held when looping through those caches | |
1661 | */ | |
749c5415 | 1662 | #define for_each_memcg_cache_index(_idx) \ |
dbcf73e2 | 1663 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c5415 | 1664 | |
7ae1e1d0 GC |
1665 | static inline bool memcg_kmem_enabled(void) |
1666 | { | |
eda330e5 | 1667 | return static_branch_likely(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
1668 | } |
1669 | ||
f4b00eab RG |
1670 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1671 | int order) | |
60cd4bcd SB |
1672 | { |
1673 | if (memcg_kmem_enabled()) | |
f4b00eab | 1674 | return __memcg_kmem_charge_page(page, gfp, order); |
60cd4bcd SB |
1675 | return 0; |
1676 | } | |
1677 | ||
f4b00eab | 1678 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1679 | { |
1680 | if (memcg_kmem_enabled()) | |
f4b00eab | 1681 | __memcg_kmem_uncharge_page(page, order); |
60cd4bcd SB |
1682 | } |
1683 | ||
33398cf2 | 1684 | /* |
a7cb874b RG |
1685 | * A helper for accessing memcg's kmem_id, used for getting |
1686 | * corresponding LRU lists. | |
33398cf2 MH |
1687 | */ |
1688 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
1689 | { | |
1690 | return memcg ? memcg->kmemcg_id : -1; | |
1691 | } | |
5722d094 | 1692 | |
8380ce47 RG |
1693 | struct mem_cgroup *mem_cgroup_from_obj(void *p); |
1694 | ||
7ae1e1d0 | 1695 | #else |
4d5c8aed RG |
1696 | static inline bool mem_cgroup_kmem_disabled(void) |
1697 | { | |
1698 | return true; | |
1699 | } | |
9b6f7e16 | 1700 | |
f4b00eab RG |
1701 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1702 | int order) | |
9b6f7e16 RG |
1703 | { |
1704 | return 0; | |
1705 | } | |
1706 | ||
f4b00eab | 1707 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
9b6f7e16 RG |
1708 | { |
1709 | } | |
1710 | ||
f4b00eab RG |
1711 | static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1712 | int order) | |
60cd4bcd SB |
1713 | { |
1714 | return 0; | |
1715 | } | |
1716 | ||
f4b00eab | 1717 | static inline void __memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1718 | { |
1719 | } | |
1720 | ||
749c5415 GC |
1721 | #define for_each_memcg_cache_index(_idx) \ |
1722 | for (; NULL; ) | |
1723 | ||
b9ce5ef4 GC |
1724 | static inline bool memcg_kmem_enabled(void) |
1725 | { | |
1726 | return false; | |
1727 | } | |
1728 | ||
2633d7a0 GC |
1729 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
1730 | { | |
1731 | return -1; | |
1732 | } | |
1733 | ||
05257a1a VD |
1734 | static inline void memcg_get_cache_ids(void) |
1735 | { | |
1736 | } | |
1737 | ||
1738 | static inline void memcg_put_cache_ids(void) | |
1739 | { | |
1740 | } | |
1741 | ||
8380ce47 RG |
1742 | static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) |
1743 | { | |
1744 | return NULL; | |
1745 | } | |
1746 | ||
84c07d11 | 1747 | #endif /* CONFIG_MEMCG_KMEM */ |
127424c8 | 1748 | |
8cdea7c0 | 1749 | #endif /* _LINUX_MEMCONTROL_H */ |