]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
8cdea7c0 BS |
2 | /* memcontrol.h - Memory Controller |
3 | * | |
4 | * Copyright IBM Corporation, 2007 | |
5 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
6 | * | |
78fb7466 PE |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8cdea7c0 BS |
9 | */ |
10 | ||
11 | #ifndef _LINUX_MEMCONTROL_H | |
12 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 13 | #include <linux/cgroup.h> |
456f998e | 14 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 15 | #include <linux/hardirq.h> |
a8964b9b | 16 | #include <linux/jump_label.h> |
33398cf2 MH |
17 | #include <linux/page_counter.h> |
18 | #include <linux/vmpressure.h> | |
19 | #include <linux/eventfd.h> | |
00f3ca2c JW |
20 | #include <linux/mm.h> |
21 | #include <linux/vmstat.h> | |
33398cf2 | 22 | #include <linux/writeback.h> |
fdf1cdb9 | 23 | #include <linux/page-flags.h> |
456f998e | 24 | |
78fb7466 | 25 | struct mem_cgroup; |
bf4f0599 | 26 | struct obj_cgroup; |
8697d331 BS |
27 | struct page; |
28 | struct mm_struct; | |
2633d7a0 | 29 | struct kmem_cache; |
78fb7466 | 30 | |
71cd3113 JW |
31 | /* Cgroup-specific page state, on top of universal node page state */ |
32 | enum memcg_stat_item { | |
468c3982 | 33 | MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, |
71cd3113 | 34 | MEMCG_SOCK, |
772616b0 | 35 | MEMCG_PERCPU_B, |
b2807f07 | 36 | MEMCG_NR_STAT, |
2a7106f2 GT |
37 | }; |
38 | ||
e27be240 JW |
39 | enum memcg_memory_event { |
40 | MEMCG_LOW, | |
71cd3113 JW |
41 | MEMCG_HIGH, |
42 | MEMCG_MAX, | |
43 | MEMCG_OOM, | |
fe6bdfc8 | 44 | MEMCG_OOM_KILL, |
4b82ab4f | 45 | MEMCG_SWAP_HIGH, |
f3a53a3a TH |
46 | MEMCG_SWAP_MAX, |
47 | MEMCG_SWAP_FAIL, | |
e27be240 | 48 | MEMCG_NR_MEMORY_EVENTS, |
71cd3113 JW |
49 | }; |
50 | ||
5660048c | 51 | struct mem_cgroup_reclaim_cookie { |
ef8f2327 | 52 | pg_data_t *pgdat; |
5660048c JW |
53 | unsigned int generation; |
54 | }; | |
55 | ||
71cd3113 JW |
56 | #ifdef CONFIG_MEMCG |
57 | ||
58 | #define MEM_CGROUP_ID_SHIFT 16 | |
59 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
60 | ||
61 | struct mem_cgroup_id { | |
62 | int id; | |
1c2d479a | 63 | refcount_t ref; |
71cd3113 JW |
64 | }; |
65 | ||
33398cf2 MH |
66 | /* |
67 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
0845f831 RD |
68 | * it will be incremented by the number of pages. This counter is used |
69 | * to trigger some periodic events. This is straightforward and better | |
33398cf2 MH |
70 | * than using jiffies etc. to handle periodic memcg event. |
71 | */ | |
72 | enum mem_cgroup_events_target { | |
73 | MEM_CGROUP_TARGET_THRESH, | |
74 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
33398cf2 MH |
75 | MEM_CGROUP_NTARGETS, |
76 | }; | |
77 | ||
871789d4 | 78 | struct memcg_vmstats_percpu { |
2d146aa3 JW |
79 | /* Local (CPU and cgroup) page state & events */ |
80 | long state[MEMCG_NR_STAT]; | |
81 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
82 | ||
83 | /* Delta calculation for lockless upward propagation */ | |
84 | long state_prev[MEMCG_NR_STAT]; | |
85 | unsigned long events_prev[NR_VM_EVENT_ITEMS]; | |
86 | ||
87 | /* Cgroup1: threshold notifications & softlimit tree updates */ | |
88 | unsigned long nr_page_events; | |
89 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
90 | }; | |
91 | ||
92 | struct memcg_vmstats { | |
93 | /* Aggregated (CPU and subtree) page state & events */ | |
94 | long state[MEMCG_NR_STAT]; | |
95 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
96 | ||
97 | /* Pending child counts during tree propagation */ | |
98 | long state_pending[MEMCG_NR_STAT]; | |
99 | unsigned long events_pending[NR_VM_EVENT_ITEMS]; | |
33398cf2 MH |
100 | }; |
101 | ||
102 | struct mem_cgroup_reclaim_iter { | |
103 | struct mem_cgroup *position; | |
104 | /* scan generation, increased every round-trip */ | |
105 | unsigned int generation; | |
106 | }; | |
107 | ||
00f3ca2c JW |
108 | struct lruvec_stat { |
109 | long count[NR_VM_NODE_STAT_ITEMS]; | |
110 | }; | |
111 | ||
f3344adf MS |
112 | struct batched_lruvec_stat { |
113 | s32 count[NR_VM_NODE_STAT_ITEMS]; | |
114 | }; | |
115 | ||
0a4465d3 | 116 | /* |
3c6f17e6 YS |
117 | * Bitmap and deferred work of shrinker::id corresponding to memcg-aware |
118 | * shrinkers, which have elements charged to this memcg. | |
0a4465d3 | 119 | */ |
e4262c4f | 120 | struct shrinker_info { |
0a4465d3 | 121 | struct rcu_head rcu; |
3c6f17e6 YS |
122 | atomic_long_t *nr_deferred; |
123 | unsigned long *map; | |
0a4465d3 KT |
124 | }; |
125 | ||
33398cf2 | 126 | /* |
242c37b4 | 127 | * per-node information in memory controller. |
33398cf2 | 128 | */ |
ef8f2327 | 129 | struct mem_cgroup_per_node { |
33398cf2 | 130 | struct lruvec lruvec; |
a983b5eb | 131 | |
f3344adf MS |
132 | /* |
133 | * Legacy local VM stats. This should be struct lruvec_stat and | |
134 | * cannot be optimized to struct batched_lruvec_stat. Because | |
135 | * the threshold of the lruvec_stat_cpu can be as big as | |
136 | * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this | |
137 | * filed has no upper limit. | |
138 | */ | |
815744d7 JW |
139 | struct lruvec_stat __percpu *lruvec_stat_local; |
140 | ||
141 | /* Subtree VM stats (batched updates) */ | |
f3344adf | 142 | struct batched_lruvec_stat __percpu *lruvec_stat_cpu; |
a983b5eb JW |
143 | atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; |
144 | ||
b4536f0c | 145 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
33398cf2 | 146 | |
9da83f3f | 147 | struct mem_cgroup_reclaim_iter iter; |
33398cf2 | 148 | |
e4262c4f | 149 | struct shrinker_info __rcu *shrinker_info; |
0a432dcb | 150 | |
33398cf2 MH |
151 | struct rb_node tree_node; /* RB tree node */ |
152 | unsigned long usage_in_excess;/* Set to the value by which */ | |
153 | /* the soft limit is exceeded*/ | |
154 | bool on_tree; | |
155 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
156 | /* use container_of */ | |
157 | }; | |
158 | ||
33398cf2 MH |
159 | struct mem_cgroup_threshold { |
160 | struct eventfd_ctx *eventfd; | |
161 | unsigned long threshold; | |
162 | }; | |
163 | ||
164 | /* For threshold */ | |
165 | struct mem_cgroup_threshold_ary { | |
166 | /* An array index points to threshold just below or equal to usage. */ | |
167 | int current_threshold; | |
168 | /* Size of entries[] */ | |
169 | unsigned int size; | |
170 | /* Array of thresholds */ | |
307ed94c | 171 | struct mem_cgroup_threshold entries[]; |
33398cf2 MH |
172 | }; |
173 | ||
174 | struct mem_cgroup_thresholds { | |
175 | /* Primary thresholds array */ | |
176 | struct mem_cgroup_threshold_ary *primary; | |
177 | /* | |
178 | * Spare threshold array. | |
179 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
180 | * It must be able to store at least primary->size - 1 entries. | |
181 | */ | |
182 | struct mem_cgroup_threshold_ary *spare; | |
183 | }; | |
184 | ||
567e9ab2 JW |
185 | enum memcg_kmem_state { |
186 | KMEM_NONE, | |
187 | KMEM_ALLOCATED, | |
188 | KMEM_ONLINE, | |
189 | }; | |
190 | ||
e81bf979 AL |
191 | #if defined(CONFIG_SMP) |
192 | struct memcg_padding { | |
193 | char x[0]; | |
194 | } ____cacheline_internodealigned_in_smp; | |
195 | #define MEMCG_PADDING(name) struct memcg_padding name; | |
196 | #else | |
197 | #define MEMCG_PADDING(name) | |
198 | #endif | |
199 | ||
97b27821 TH |
200 | /* |
201 | * Remember four most recent foreign writebacks with dirty pages in this | |
202 | * cgroup. Inode sharing is expected to be uncommon and, even if we miss | |
203 | * one in a given round, we're likely to catch it later if it keeps | |
204 | * foreign-dirtying, so a fairly low count should be enough. | |
205 | * | |
206 | * See mem_cgroup_track_foreign_dirty_slowpath() for details. | |
207 | */ | |
208 | #define MEMCG_CGWB_FRN_CNT 4 | |
209 | ||
210 | struct memcg_cgwb_frn { | |
211 | u64 bdi_id; /* bdi->id of the foreign inode */ | |
212 | int memcg_id; /* memcg->css.id of foreign inode */ | |
213 | u64 at; /* jiffies_64 at the time of dirtying */ | |
214 | struct wb_completion done; /* tracks in-flight foreign writebacks */ | |
215 | }; | |
216 | ||
bf4f0599 RG |
217 | /* |
218 | * Bucket for arbitrarily byte-sized objects charged to a memory | |
219 | * cgroup. The bucket can be reparented in one piece when the cgroup | |
220 | * is destroyed, without having to round up the individual references | |
221 | * of all live memory objects in the wild. | |
222 | */ | |
223 | struct obj_cgroup { | |
224 | struct percpu_ref refcnt; | |
225 | struct mem_cgroup *memcg; | |
226 | atomic_t nr_charged_bytes; | |
227 | union { | |
228 | struct list_head list; | |
229 | struct rcu_head rcu; | |
230 | }; | |
231 | }; | |
232 | ||
33398cf2 MH |
233 | /* |
234 | * The memory controller data structure. The memory controller controls both | |
235 | * page cache and RSS per cgroup. We would eventually like to provide | |
236 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
237 | * to help the administrator determine what knobs to tune. | |
238 | */ | |
239 | struct mem_cgroup { | |
240 | struct cgroup_subsys_state css; | |
241 | ||
73f576c0 JW |
242 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
243 | struct mem_cgroup_id id; | |
244 | ||
33398cf2 | 245 | /* Accounted resources */ |
bd0b230f WL |
246 | struct page_counter memory; /* Both v1 & v2 */ |
247 | ||
248 | union { | |
249 | struct page_counter swap; /* v2 only */ | |
250 | struct page_counter memsw; /* v1 only */ | |
251 | }; | |
0db15298 JW |
252 | |
253 | /* Legacy consumer-oriented counters */ | |
bd0b230f WL |
254 | struct page_counter kmem; /* v1 only */ |
255 | struct page_counter tcpmem; /* v1 only */ | |
33398cf2 | 256 | |
f7e1cb6e JW |
257 | /* Range enforcement for interrupt charges */ |
258 | struct work_struct high_work; | |
259 | ||
33398cf2 MH |
260 | unsigned long soft_limit; |
261 | ||
262 | /* vmpressure notifications */ | |
263 | struct vmpressure vmpressure; | |
264 | ||
3d8b38eb RG |
265 | /* |
266 | * Should the OOM killer kill all belonging tasks, had it kill one? | |
267 | */ | |
268 | bool oom_group; | |
269 | ||
33398cf2 MH |
270 | /* protected by memcg_oom_lock */ |
271 | bool oom_lock; | |
272 | int under_oom; | |
273 | ||
274 | int swappiness; | |
275 | /* OOM-Killer disable */ | |
276 | int oom_kill_disable; | |
277 | ||
1e577f97 | 278 | /* memory.events and memory.events.local */ |
472912a2 | 279 | struct cgroup_file events_file; |
1e577f97 | 280 | struct cgroup_file events_local_file; |
472912a2 | 281 | |
f3a53a3a TH |
282 | /* handle for "memory.swap.events" */ |
283 | struct cgroup_file swap_events_file; | |
284 | ||
33398cf2 MH |
285 | /* protect arrays of thresholds */ |
286 | struct mutex thresholds_lock; | |
287 | ||
288 | /* thresholds for memory usage. RCU-protected */ | |
289 | struct mem_cgroup_thresholds thresholds; | |
290 | ||
291 | /* thresholds for mem+swap usage. RCU-protected */ | |
292 | struct mem_cgroup_thresholds memsw_thresholds; | |
293 | ||
294 | /* For oom notifier event fd */ | |
295 | struct list_head oom_notify; | |
296 | ||
297 | /* | |
298 | * Should we move charges of a task when a task is moved into this | |
299 | * mem_cgroup ? And what type of charges should we move ? | |
300 | */ | |
301 | unsigned long move_charge_at_immigrate; | |
e81bf979 AL |
302 | /* taken only while moving_account > 0 */ |
303 | spinlock_t move_lock; | |
304 | unsigned long move_lock_flags; | |
305 | ||
306 | MEMCG_PADDING(_pad1_); | |
307 | ||
2d146aa3 JW |
308 | /* memory.stat */ |
309 | struct memcg_vmstats vmstats; | |
42a30035 | 310 | |
815744d7 | 311 | /* memory.events */ |
42a30035 | 312 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; |
1e577f97 | 313 | atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; |
33398cf2 | 314 | |
d886f4e4 JW |
315 | unsigned long socket_pressure; |
316 | ||
317 | /* Legacy tcp memory accounting */ | |
0db15298 JW |
318 | bool tcpmem_active; |
319 | int tcpmem_pressure; | |
d886f4e4 | 320 | |
84c07d11 | 321 | #ifdef CONFIG_MEMCG_KMEM |
33398cf2 | 322 | int kmemcg_id; |
567e9ab2 | 323 | enum memcg_kmem_state kmem_state; |
bf4f0599 RG |
324 | struct obj_cgroup __rcu *objcg; |
325 | struct list_head objcg_list; /* list of inherited objcgs */ | |
33398cf2 MH |
326 | #endif |
327 | ||
4df91062 FT |
328 | MEMCG_PADDING(_pad2_); |
329 | ||
330 | /* | |
331 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
332 | */ | |
333 | atomic_t moving_account; | |
334 | struct task_struct *move_lock_task; | |
335 | ||
4df91062 FT |
336 | struct memcg_vmstats_percpu __percpu *vmstats_percpu; |
337 | ||
33398cf2 MH |
338 | #ifdef CONFIG_CGROUP_WRITEBACK |
339 | struct list_head cgwb_list; | |
340 | struct wb_domain cgwb_domain; | |
97b27821 | 341 | struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; |
33398cf2 MH |
342 | #endif |
343 | ||
344 | /* List of events which userspace want to receive */ | |
345 | struct list_head event_list; | |
346 | spinlock_t event_list_lock; | |
347 | ||
87eaceb3 YS |
348 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
349 | struct deferred_split deferred_split_queue; | |
350 | #endif | |
351 | ||
33398cf2 MH |
352 | struct mem_cgroup_per_node *nodeinfo[0]; |
353 | /* WARNING: nodeinfo must be the last member here */ | |
354 | }; | |
7d828602 | 355 | |
a983b5eb JW |
356 | /* |
357 | * size of first charge trial. "32" comes from vmscan.c's magic value. | |
358 | * TODO: maybe necessary to use big numbers in big irons. | |
359 | */ | |
360 | #define MEMCG_CHARGE_BATCH 32U | |
361 | ||
7d828602 | 362 | extern struct mem_cgroup *root_mem_cgroup; |
56161634 | 363 | |
87944e29 RG |
364 | enum page_memcg_data_flags { |
365 | /* page->memcg_data is a pointer to an objcgs vector */ | |
366 | MEMCG_DATA_OBJCGS = (1UL << 0), | |
18b2db3b RG |
367 | /* page has been accounted as a non-slab kernel page */ |
368 | MEMCG_DATA_KMEM = (1UL << 1), | |
87944e29 | 369 | /* the next bit after the last actual flag */ |
18b2db3b | 370 | __NR_MEMCG_DATA_FLAGS = (1UL << 2), |
87944e29 RG |
371 | }; |
372 | ||
373 | #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) | |
374 | ||
b4e0b68f MS |
375 | static inline bool PageMemcgKmem(struct page *page); |
376 | ||
377 | /* | |
378 | * After the initialization objcg->memcg is always pointing at | |
379 | * a valid memcg, but can be atomically swapped to the parent memcg. | |
380 | * | |
381 | * The caller must ensure that the returned memcg won't be released: | |
382 | * e.g. acquire the rcu_read_lock or css_set_lock. | |
383 | */ | |
384 | static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) | |
385 | { | |
386 | return READ_ONCE(objcg->memcg); | |
387 | } | |
388 | ||
389 | /* | |
390 | * __page_memcg - get the memory cgroup associated with a non-kmem page | |
391 | * @page: a pointer to the page struct | |
392 | * | |
393 | * Returns a pointer to the memory cgroup associated with the page, | |
394 | * or NULL. This function assumes that the page is known to have a | |
395 | * proper memory cgroup pointer. It's not safe to call this function | |
396 | * against some type of pages, e.g. slab pages or ex-slab pages or | |
397 | * kmem pages. | |
398 | */ | |
399 | static inline struct mem_cgroup *__page_memcg(struct page *page) | |
400 | { | |
401 | unsigned long memcg_data = page->memcg_data; | |
402 | ||
403 | VM_BUG_ON_PAGE(PageSlab(page), page); | |
404 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); | |
405 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); | |
406 | ||
407 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
408 | } | |
409 | ||
410 | /* | |
411 | * __page_objcg - get the object cgroup associated with a kmem page | |
412 | * @page: a pointer to the page struct | |
413 | * | |
414 | * Returns a pointer to the object cgroup associated with the page, | |
415 | * or NULL. This function assumes that the page is known to have a | |
416 | * proper object cgroup pointer. It's not safe to call this function | |
417 | * against some type of pages, e.g. slab pages or ex-slab pages or | |
418 | * LRU pages. | |
419 | */ | |
420 | static inline struct obj_cgroup *__page_objcg(struct page *page) | |
421 | { | |
422 | unsigned long memcg_data = page->memcg_data; | |
423 | ||
424 | VM_BUG_ON_PAGE(PageSlab(page), page); | |
425 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); | |
426 | VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page); | |
427 | ||
428 | return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
429 | } | |
430 | ||
bcfe06bf RG |
431 | /* |
432 | * page_memcg - get the memory cgroup associated with a page | |
433 | * @page: a pointer to the page struct | |
434 | * | |
435 | * Returns a pointer to the memory cgroup associated with the page, | |
436 | * or NULL. This function assumes that the page is known to have a | |
437 | * proper memory cgroup pointer. It's not safe to call this function | |
438 | * against some type of pages, e.g. slab pages or ex-slab pages. | |
439 | * | |
b4e0b68f MS |
440 | * For a non-kmem page any of the following ensures page and memcg binding |
441 | * stability: | |
442 | * | |
bcfe06bf RG |
443 | * - the page lock |
444 | * - LRU isolation | |
445 | * - lock_page_memcg() | |
446 | * - exclusive reference | |
b4e0b68f MS |
447 | * |
448 | * For a kmem page a caller should hold an rcu read lock to protect memcg | |
449 | * associated with a kmem page from being released. | |
bcfe06bf RG |
450 | */ |
451 | static inline struct mem_cgroup *page_memcg(struct page *page) | |
452 | { | |
b4e0b68f MS |
453 | if (PageMemcgKmem(page)) |
454 | return obj_cgroup_memcg(__page_objcg(page)); | |
455 | else | |
456 | return __page_memcg(page); | |
bcfe06bf RG |
457 | } |
458 | ||
459 | /* | |
460 | * page_memcg_rcu - locklessly get the memory cgroup associated with a page | |
461 | * @page: a pointer to the page struct | |
462 | * | |
463 | * Returns a pointer to the memory cgroup associated with the page, | |
464 | * or NULL. This function assumes that the page is known to have a | |
465 | * proper memory cgroup pointer. It's not safe to call this function | |
466 | * against some type of pages, e.g. slab pages or ex-slab pages. | |
467 | */ | |
468 | static inline struct mem_cgroup *page_memcg_rcu(struct page *page) | |
469 | { | |
b4e0b68f MS |
470 | unsigned long memcg_data = READ_ONCE(page->memcg_data); |
471 | ||
bcfe06bf RG |
472 | VM_BUG_ON_PAGE(PageSlab(page), page); |
473 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
474 | ||
b4e0b68f MS |
475 | if (memcg_data & MEMCG_DATA_KMEM) { |
476 | struct obj_cgroup *objcg; | |
477 | ||
478 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
479 | return obj_cgroup_memcg(objcg); | |
480 | } | |
481 | ||
482 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
bcfe06bf RG |
483 | } |
484 | ||
485 | /* | |
486 | * page_memcg_check - get the memory cgroup associated with a page | |
487 | * @page: a pointer to the page struct | |
488 | * | |
489 | * Returns a pointer to the memory cgroup associated with the page, | |
b4e0b68f | 490 | * or NULL. This function unlike page_memcg() can take any page |
bcfe06bf | 491 | * as an argument. It has to be used in cases when it's not known if a page |
b4e0b68f MS |
492 | * has an associated memory cgroup pointer or an object cgroups vector or |
493 | * an object cgroup. | |
494 | * | |
495 | * For a non-kmem page any of the following ensures page and memcg binding | |
496 | * stability: | |
bcfe06bf | 497 | * |
bcfe06bf RG |
498 | * - the page lock |
499 | * - LRU isolation | |
500 | * - lock_page_memcg() | |
501 | * - exclusive reference | |
b4e0b68f MS |
502 | * |
503 | * For a kmem page a caller should hold an rcu read lock to protect memcg | |
504 | * associated with a kmem page from being released. | |
bcfe06bf RG |
505 | */ |
506 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
507 | { | |
508 | /* | |
509 | * Because page->memcg_data might be changed asynchronously | |
510 | * for slab pages, READ_ONCE() should be used here. | |
511 | */ | |
512 | unsigned long memcg_data = READ_ONCE(page->memcg_data); | |
513 | ||
87944e29 | 514 | if (memcg_data & MEMCG_DATA_OBJCGS) |
bcfe06bf RG |
515 | return NULL; |
516 | ||
b4e0b68f MS |
517 | if (memcg_data & MEMCG_DATA_KMEM) { |
518 | struct obj_cgroup *objcg; | |
519 | ||
520 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
521 | return obj_cgroup_memcg(objcg); | |
522 | } | |
523 | ||
18b2db3b RG |
524 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
525 | } | |
526 | ||
bd290e1e | 527 | #ifdef CONFIG_MEMCG_KMEM |
18b2db3b RG |
528 | /* |
529 | * PageMemcgKmem - check if the page has MemcgKmem flag set | |
530 | * @page: a pointer to the page struct | |
531 | * | |
532 | * Checks if the page has MemcgKmem flag set. The caller must ensure that | |
533 | * the page has an associated memory cgroup. It's not safe to call this function | |
534 | * against some types of pages, e.g. slab pages. | |
535 | */ | |
536 | static inline bool PageMemcgKmem(struct page *page) | |
537 | { | |
538 | VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page); | |
539 | return page->memcg_data & MEMCG_DATA_KMEM; | |
bcfe06bf RG |
540 | } |
541 | ||
270c6a71 RG |
542 | /* |
543 | * page_objcgs - get the object cgroups vector associated with a page | |
544 | * @page: a pointer to the page struct | |
545 | * | |
546 | * Returns a pointer to the object cgroups vector associated with the page, | |
547 | * or NULL. This function assumes that the page is known to have an | |
548 | * associated object cgroups vector. It's not safe to call this function | |
549 | * against pages, which might have an associated memory cgroup: e.g. | |
550 | * kernel stack pages. | |
551 | */ | |
552 | static inline struct obj_cgroup **page_objcgs(struct page *page) | |
553 | { | |
87944e29 RG |
554 | unsigned long memcg_data = READ_ONCE(page->memcg_data); |
555 | ||
556 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page); | |
18b2db3b | 557 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); |
87944e29 RG |
558 | |
559 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
270c6a71 RG |
560 | } |
561 | ||
562 | /* | |
563 | * page_objcgs_check - get the object cgroups vector associated with a page | |
564 | * @page: a pointer to the page struct | |
565 | * | |
566 | * Returns a pointer to the object cgroups vector associated with the page, | |
567 | * or NULL. This function is safe to use if the page can be directly associated | |
568 | * with a memory cgroup. | |
569 | */ | |
570 | static inline struct obj_cgroup **page_objcgs_check(struct page *page) | |
571 | { | |
572 | unsigned long memcg_data = READ_ONCE(page->memcg_data); | |
573 | ||
87944e29 RG |
574 | if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS)) |
575 | return NULL; | |
270c6a71 | 576 | |
18b2db3b RG |
577 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); |
578 | ||
87944e29 | 579 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
270c6a71 RG |
580 | } |
581 | ||
270c6a71 | 582 | #else |
bd290e1e MS |
583 | static inline bool PageMemcgKmem(struct page *page) |
584 | { | |
585 | return false; | |
586 | } | |
587 | ||
270c6a71 RG |
588 | static inline struct obj_cgroup **page_objcgs(struct page *page) |
589 | { | |
590 | return NULL; | |
591 | } | |
592 | ||
593 | static inline struct obj_cgroup **page_objcgs_check(struct page *page) | |
594 | { | |
595 | return NULL; | |
596 | } | |
270c6a71 RG |
597 | #endif |
598 | ||
772616b0 RG |
599 | static __always_inline bool memcg_stat_item_in_bytes(int idx) |
600 | { | |
601 | if (idx == MEMCG_PERCPU_B) | |
602 | return true; | |
603 | return vmstat_item_in_bytes(idx); | |
604 | } | |
605 | ||
dfd2f10c KT |
606 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
607 | { | |
608 | return (memcg == root_mem_cgroup); | |
609 | } | |
610 | ||
23047a96 JW |
611 | static inline bool mem_cgroup_disabled(void) |
612 | { | |
613 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
614 | } | |
615 | ||
22f7496f YS |
616 | static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, |
617 | struct mem_cgroup *memcg, | |
1bc63fb1 | 618 | bool in_low_reclaim) |
9783aa99 | 619 | { |
1bc63fb1 CD |
620 | if (mem_cgroup_disabled()) |
621 | return 0; | |
622 | ||
22f7496f YS |
623 | /* |
624 | * There is no reclaim protection applied to a targeted reclaim. | |
625 | * We are special casing this specific case here because | |
626 | * mem_cgroup_protected calculation is not robust enough to keep | |
627 | * the protection invariant for calculated effective values for | |
628 | * parallel reclaimers with different reclaim target. This is | |
629 | * especially a problem for tail memcgs (as they have pages on LRU) | |
630 | * which would want to have effective values 0 for targeted reclaim | |
631 | * but a different value for external reclaim. | |
632 | * | |
633 | * Example | |
634 | * Let's have global and A's reclaim in parallel: | |
635 | * | | |
636 | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) | |
637 | * |\ | |
638 | * | C (low = 1G, usage = 2.5G) | |
639 | * B (low = 1G, usage = 0.5G) | |
640 | * | |
641 | * For the global reclaim | |
642 | * A.elow = A.low | |
643 | * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow | |
644 | * C.elow = min(C.usage, C.low) | |
645 | * | |
646 | * With the effective values resetting we have A reclaim | |
647 | * A.elow = 0 | |
648 | * B.elow = B.low | |
649 | * C.elow = C.low | |
650 | * | |
651 | * If the global reclaim races with A's reclaim then | |
652 | * B.elow = C.elow = 0 because children_low_usage > A.elow) | |
653 | * is possible and reclaiming B would be violating the protection. | |
654 | * | |
655 | */ | |
656 | if (root == memcg) | |
657 | return 0; | |
658 | ||
1bc63fb1 CD |
659 | if (in_low_reclaim) |
660 | return READ_ONCE(memcg->memory.emin); | |
9783aa99 | 661 | |
1bc63fb1 CD |
662 | return max(READ_ONCE(memcg->memory.emin), |
663 | READ_ONCE(memcg->memory.elow)); | |
9783aa99 CD |
664 | } |
665 | ||
45c7f7e1 CD |
666 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
667 | struct mem_cgroup *memcg); | |
668 | ||
669 | static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) | |
670 | { | |
671 | /* | |
672 | * The root memcg doesn't account charges, and doesn't support | |
673 | * protection. | |
674 | */ | |
675 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); | |
676 | ||
677 | } | |
678 | ||
679 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
680 | { | |
681 | if (!mem_cgroup_supports_protection(memcg)) | |
682 | return false; | |
683 | ||
684 | return READ_ONCE(memcg->memory.elow) >= | |
685 | page_counter_read(&memcg->memory); | |
686 | } | |
687 | ||
688 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
689 | { | |
690 | if (!mem_cgroup_supports_protection(memcg)) | |
691 | return false; | |
692 | ||
693 | return READ_ONCE(memcg->memory.emin) >= | |
694 | page_counter_read(&memcg->memory); | |
695 | } | |
241994ed | 696 | |
d9eb1ea2 | 697 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); |
0add0c77 SB |
698 | int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, |
699 | gfp_t gfp, swp_entry_t entry); | |
700 | void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); | |
3fea5a49 | 701 | |
0a31bc97 | 702 | void mem_cgroup_uncharge(struct page *page); |
747db954 | 703 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
569b846d | 704 | |
6a93ca8f | 705 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); |
569b846d | 706 | |
55779ec7 | 707 | /** |
867e5e1d | 708 | * mem_cgroup_lruvec - get the lru list vector for a memcg & node |
55779ec7 | 709 | * @memcg: memcg of the wanted lruvec |
9a1ac228 | 710 | * @pgdat: pglist_data |
55779ec7 | 711 | * |
867e5e1d | 712 | * Returns the lru list vector holding pages for a given @memcg & |
9a1ac228 | 713 | * @pgdat combination. This can be the node lruvec, if the memory |
867e5e1d | 714 | * controller is disabled. |
55779ec7 | 715 | */ |
867e5e1d JW |
716 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
717 | struct pglist_data *pgdat) | |
55779ec7 | 718 | { |
ef8f2327 | 719 | struct mem_cgroup_per_node *mz; |
55779ec7 JW |
720 | struct lruvec *lruvec; |
721 | ||
722 | if (mem_cgroup_disabled()) { | |
867e5e1d | 723 | lruvec = &pgdat->__lruvec; |
55779ec7 JW |
724 | goto out; |
725 | } | |
726 | ||
1b05117d JW |
727 | if (!memcg) |
728 | memcg = root_mem_cgroup; | |
729 | ||
a3747b53 | 730 | mz = memcg->nodeinfo[pgdat->node_id]; |
55779ec7 JW |
731 | lruvec = &mz->lruvec; |
732 | out: | |
733 | /* | |
734 | * Since a node can be onlined after the mem_cgroup was created, | |
599d0c95 | 735 | * we have to be prepared to initialize lruvec->pgdat here; |
55779ec7 JW |
736 | * and if offlined then reonlined, we need to reinitialize it. |
737 | */ | |
ef8f2327 MG |
738 | if (unlikely(lruvec->pgdat != pgdat)) |
739 | lruvec->pgdat = pgdat; | |
55779ec7 JW |
740 | return lruvec; |
741 | } | |
742 | ||
9a1ac228 HS |
743 | /** |
744 | * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page | |
745 | * @page: the page | |
9a1ac228 HS |
746 | * |
747 | * This function relies on page->mem_cgroup being stable. | |
748 | */ | |
a984226f | 749 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) |
9a1ac228 | 750 | { |
a984226f | 751 | pg_data_t *pgdat = page_pgdat(page); |
9a1ac228 HS |
752 | struct mem_cgroup *memcg = page_memcg(page); |
753 | ||
7ea510b9 | 754 | VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); |
9a1ac228 HS |
755 | return mem_cgroup_lruvec(memcg, pgdat); |
756 | } | |
c9b0ed51 | 757 | |
64219994 | 758 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e993d905 | 759 | |
d46eb14b SB |
760 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); |
761 | ||
6168d0da AS |
762 | struct lruvec *lock_page_lruvec(struct page *page); |
763 | struct lruvec *lock_page_lruvec_irq(struct page *page); | |
764 | struct lruvec *lock_page_lruvec_irqsave(struct page *page, | |
765 | unsigned long *flags); | |
766 | ||
767 | #ifdef CONFIG_DEBUG_VM | |
768 | void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page); | |
769 | #else | |
770 | static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) | |
771 | { | |
772 | } | |
773 | #endif | |
774 | ||
33398cf2 MH |
775 | static inline |
776 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
777 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
778 | } | |
779 | ||
bf4f0599 RG |
780 | static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
781 | { | |
782 | return percpu_ref_tryget(&objcg->refcnt); | |
783 | } | |
784 | ||
785 | static inline void obj_cgroup_get(struct obj_cgroup *objcg) | |
786 | { | |
787 | percpu_ref_get(&objcg->refcnt); | |
788 | } | |
789 | ||
b4e0b68f MS |
790 | static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, |
791 | unsigned long nr) | |
bf4f0599 | 792 | { |
b4e0b68f | 793 | percpu_ref_get_many(&objcg->refcnt, nr); |
bf4f0599 RG |
794 | } |
795 | ||
b4e0b68f | 796 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
bf4f0599 | 797 | { |
b4e0b68f | 798 | percpu_ref_put(&objcg->refcnt); |
bf4f0599 RG |
799 | } |
800 | ||
dc0b5864 RG |
801 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
802 | { | |
d46eb14b SB |
803 | if (memcg) |
804 | css_put(&memcg->css); | |
dc0b5864 RG |
805 | } |
806 | ||
8e8ae645 JW |
807 | #define mem_cgroup_from_counter(counter, member) \ |
808 | container_of(counter, struct mem_cgroup, member) | |
809 | ||
33398cf2 MH |
810 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
811 | struct mem_cgroup *, | |
812 | struct mem_cgroup_reclaim_cookie *); | |
813 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
7c5f64f8 VD |
814 | int mem_cgroup_scan_tasks(struct mem_cgroup *, |
815 | int (*)(struct task_struct *, void *), void *); | |
33398cf2 | 816 | |
23047a96 JW |
817 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
818 | { | |
819 | if (mem_cgroup_disabled()) | |
820 | return 0; | |
821 | ||
73f576c0 | 822 | return memcg->id.id; |
23047a96 | 823 | } |
73f576c0 | 824 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
23047a96 | 825 | |
aa9694bb CD |
826 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
827 | { | |
828 | return mem_cgroup_from_css(seq_css(m)); | |
829 | } | |
830 | ||
2262185c RG |
831 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
832 | { | |
833 | struct mem_cgroup_per_node *mz; | |
834 | ||
835 | if (mem_cgroup_disabled()) | |
836 | return NULL; | |
837 | ||
838 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
839 | return mz->memcg; | |
840 | } | |
841 | ||
8e8ae645 JW |
842 | /** |
843 | * parent_mem_cgroup - find the accounting parent of a memcg | |
844 | * @memcg: memcg whose parent to find | |
845 | * | |
846 | * Returns the parent memcg, or NULL if this is the root or the memory | |
847 | * controller is in legacy no-hierarchy mode. | |
848 | */ | |
849 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
850 | { | |
851 | if (!memcg->memory.parent) | |
852 | return NULL; | |
853 | return mem_cgroup_from_counter(memcg->memory.parent, memory); | |
854 | } | |
855 | ||
33398cf2 MH |
856 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
857 | struct mem_cgroup *root) | |
858 | { | |
859 | if (root == memcg) | |
860 | return true; | |
33398cf2 MH |
861 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); |
862 | } | |
e1aab161 | 863 | |
2314b42d JW |
864 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
865 | struct mem_cgroup *memcg) | |
2e4d4091 | 866 | { |
587af308 | 867 | struct mem_cgroup *task_memcg; |
413918bb | 868 | bool match = false; |
c3ac9a8a | 869 | |
2e4d4091 | 870 | rcu_read_lock(); |
587af308 | 871 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 872 | if (task_memcg) |
2314b42d | 873 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 874 | rcu_read_unlock(); |
c3ac9a8a | 875 | return match; |
2e4d4091 | 876 | } |
8a9f3ccd | 877 | |
64219994 | 878 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 879 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 880 | |
eb01aaab VD |
881 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
882 | { | |
883 | if (mem_cgroup_disabled()) | |
884 | return true; | |
885 | return !!(memcg->css.flags & CSS_ONLINE); | |
886 | } | |
887 | ||
58ae83db KH |
888 | /* |
889 | * For memory reclaim. | |
890 | */ | |
889976db | 891 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
33398cf2 MH |
892 | |
893 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
b4536f0c | 894 | int zid, int nr_pages); |
33398cf2 | 895 | |
b4536f0c MH |
896 | static inline |
897 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
898 | enum lru_list lru, int zone_idx) | |
899 | { | |
900 | struct mem_cgroup_per_node *mz; | |
901 | ||
902 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
e0e3f42f | 903 | return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); |
33398cf2 MH |
904 | } |
905 | ||
b23afb93 TH |
906 | void mem_cgroup_handle_over_high(void); |
907 | ||
bbec2e15 | 908 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
7c5f64f8 | 909 | |
9783aa99 CD |
910 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg); |
911 | ||
f0c867d9 | 912 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
64219994 | 913 | struct task_struct *p); |
58ae83db | 914 | |
f0c867d9 | 915 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); |
916 | ||
29ef680a | 917 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 | 918 | { |
29ef680a MH |
919 | WARN_ON(current->in_user_fault); |
920 | current->in_user_fault = 1; | |
519e5247 JW |
921 | } |
922 | ||
29ef680a | 923 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 | 924 | { |
29ef680a MH |
925 | WARN_ON(!current->in_user_fault); |
926 | current->in_user_fault = 0; | |
519e5247 JW |
927 | } |
928 | ||
3812c8c8 JW |
929 | static inline bool task_in_memcg_oom(struct task_struct *p) |
930 | { | |
626ebc41 | 931 | return p->memcg_in_oom; |
3812c8c8 JW |
932 | } |
933 | ||
49426420 | 934 | bool mem_cgroup_oom_synchronize(bool wait); |
3d8b38eb RG |
935 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, |
936 | struct mem_cgroup *oom_domain); | |
937 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); | |
3812c8c8 | 938 | |
c255a458 | 939 | #ifdef CONFIG_MEMCG_SWAP |
eccb52e7 | 940 | extern bool cgroup_memory_noswap; |
c077719b | 941 | #endif |
f8d66542 | 942 | |
1c824a68 | 943 | void lock_page_memcg(struct page *page); |
62cccb8c | 944 | void unlock_page_memcg(struct page *page); |
d7365e78 | 945 | |
db9adbcb | 946 | void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); |
2a2e4885 | 947 | |
04fecbf5 | 948 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 949 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 950 | int idx, int val) |
2a2e4885 | 951 | { |
c3cc3911 JW |
952 | unsigned long flags; |
953 | ||
954 | local_irq_save(flags); | |
a983b5eb | 955 | __mod_memcg_state(memcg, idx, val); |
c3cc3911 | 956 | local_irq_restore(flags); |
2a2e4885 JW |
957 | } |
958 | ||
42a30035 JW |
959 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
960 | enum node_stat_item idx) | |
961 | { | |
962 | struct mem_cgroup_per_node *pn; | |
963 | long x; | |
964 | ||
965 | if (mem_cgroup_disabled()) | |
966 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
967 | ||
968 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
969 | x = atomic_long_read(&pn->lruvec_stat[idx]); | |
970 | #ifdef CONFIG_SMP | |
971 | if (x < 0) | |
972 | x = 0; | |
973 | #endif | |
974 | return x; | |
975 | } | |
976 | ||
205b20cc JW |
977 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
978 | enum node_stat_item idx) | |
2a7106f2 | 979 | { |
00f3ca2c | 980 | struct mem_cgroup_per_node *pn; |
815744d7 JW |
981 | long x = 0; |
982 | int cpu; | |
00f3ca2c JW |
983 | |
984 | if (mem_cgroup_disabled()) | |
985 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
986 | ||
987 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
815744d7 JW |
988 | for_each_possible_cpu(cpu) |
989 | x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); | |
a983b5eb JW |
990 | #ifdef CONFIG_SMP |
991 | if (x < 0) | |
992 | x = 0; | |
993 | #endif | |
994 | return x; | |
2a7106f2 GT |
995 | } |
996 | ||
eedc4e5a RG |
997 | void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
998 | int val); | |
da3ceeff | 999 | void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); |
991e7673 | 1000 | |
da3ceeff | 1001 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1002 | int val) |
1003 | { | |
1004 | unsigned long flags; | |
1005 | ||
1006 | local_irq_save(flags); | |
da3ceeff | 1007 | __mod_lruvec_kmem_state(p, idx, val); |
991e7673 SB |
1008 | local_irq_restore(flags); |
1009 | } | |
1010 | ||
eedc4e5a RG |
1011 | static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, |
1012 | enum node_stat_item idx, int val) | |
1013 | { | |
1014 | unsigned long flags; | |
1015 | ||
1016 | local_irq_save(flags); | |
1017 | __mod_memcg_lruvec_state(lruvec, idx, val); | |
1018 | local_irq_restore(flags); | |
1019 | } | |
1020 | ||
db9adbcb JW |
1021 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, |
1022 | unsigned long count); | |
c9019e9b | 1023 | |
2262185c | 1024 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
e27be240 JW |
1025 | enum vm_event_item idx, |
1026 | unsigned long count) | |
2262185c | 1027 | { |
c3cc3911 JW |
1028 | unsigned long flags; |
1029 | ||
1030 | local_irq_save(flags); | |
a983b5eb | 1031 | __count_memcg_events(memcg, idx, count); |
c3cc3911 | 1032 | local_irq_restore(flags); |
2262185c RG |
1033 | } |
1034 | ||
1035 | static inline void count_memcg_page_event(struct page *page, | |
e27be240 | 1036 | enum vm_event_item idx) |
2262185c | 1037 | { |
bcfe06bf RG |
1038 | struct mem_cgroup *memcg = page_memcg(page); |
1039 | ||
1040 | if (memcg) | |
1041 | count_memcg_events(memcg, idx, 1); | |
2262185c RG |
1042 | } |
1043 | ||
1044 | static inline void count_memcg_event_mm(struct mm_struct *mm, | |
1045 | enum vm_event_item idx) | |
68ae564b | 1046 | { |
33398cf2 MH |
1047 | struct mem_cgroup *memcg; |
1048 | ||
68ae564b DR |
1049 | if (mem_cgroup_disabled()) |
1050 | return; | |
33398cf2 MH |
1051 | |
1052 | rcu_read_lock(); | |
1053 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
fe6bdfc8 | 1054 | if (likely(memcg)) |
c9019e9b | 1055 | count_memcg_events(memcg, idx, 1); |
33398cf2 | 1056 | rcu_read_unlock(); |
68ae564b | 1057 | } |
c9019e9b | 1058 | |
e27be240 JW |
1059 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1060 | enum memcg_memory_event event) | |
c9019e9b | 1061 | { |
8b21ca02 MS |
1062 | bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || |
1063 | event == MEMCG_SWAP_FAIL; | |
1064 | ||
1e577f97 | 1065 | atomic_long_inc(&memcg->memory_events_local[event]); |
8b21ca02 MS |
1066 | if (!swap_event) |
1067 | cgroup_file_notify(&memcg->events_local_file); | |
1e577f97 | 1068 | |
9852ae3f CD |
1069 | do { |
1070 | atomic_long_inc(&memcg->memory_events[event]); | |
8b21ca02 MS |
1071 | if (swap_event) |
1072 | cgroup_file_notify(&memcg->swap_events_file); | |
1073 | else | |
1074 | cgroup_file_notify(&memcg->events_file); | |
9852ae3f | 1075 | |
04fd61a4 YS |
1076 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1077 | break; | |
9852ae3f CD |
1078 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
1079 | break; | |
1080 | } while ((memcg = parent_mem_cgroup(memcg)) && | |
1081 | !mem_cgroup_is_root(memcg)); | |
c9019e9b JW |
1082 | } |
1083 | ||
fe6bdfc8 RG |
1084 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1085 | enum memcg_memory_event event) | |
1086 | { | |
1087 | struct mem_cgroup *memcg; | |
1088 | ||
1089 | if (mem_cgroup_disabled()) | |
1090 | return; | |
1091 | ||
1092 | rcu_read_lock(); | |
1093 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
1094 | if (likely(memcg)) | |
1095 | memcg_memory_event(memcg, event); | |
1096 | rcu_read_unlock(); | |
1097 | } | |
1098 | ||
be6c8982 | 1099 | void split_page_memcg(struct page *head, unsigned int nr); |
ca3e0214 | 1100 | |
2d146aa3 JW |
1101 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
1102 | gfp_t gfp_mask, | |
1103 | unsigned long *total_scanned); | |
1104 | ||
c255a458 | 1105 | #else /* CONFIG_MEMCG */ |
23047a96 JW |
1106 | |
1107 | #define MEM_CGROUP_ID_SHIFT 0 | |
1108 | #define MEM_CGROUP_ID_MAX 0 | |
1109 | ||
bcfe06bf RG |
1110 | static inline struct mem_cgroup *page_memcg(struct page *page) |
1111 | { | |
1112 | return NULL; | |
1113 | } | |
1114 | ||
1115 | static inline struct mem_cgroup *page_memcg_rcu(struct page *page) | |
1116 | { | |
1117 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1118 | return NULL; | |
1119 | } | |
1120 | ||
1121 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
1122 | { | |
1123 | return NULL; | |
1124 | } | |
1125 | ||
18b2db3b RG |
1126 | static inline bool PageMemcgKmem(struct page *page) |
1127 | { | |
1128 | return false; | |
1129 | } | |
1130 | ||
dfd2f10c KT |
1131 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
1132 | { | |
1133 | return true; | |
1134 | } | |
1135 | ||
23047a96 JW |
1136 | static inline bool mem_cgroup_disabled(void) |
1137 | { | |
1138 | return true; | |
1139 | } | |
1140 | ||
e27be240 JW |
1141 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1142 | enum memcg_memory_event event) | |
241994ed JW |
1143 | { |
1144 | } | |
1145 | ||
fe6bdfc8 RG |
1146 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1147 | enum memcg_memory_event event) | |
1148 | { | |
1149 | } | |
1150 | ||
22f7496f YS |
1151 | static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, |
1152 | struct mem_cgroup *memcg, | |
1bc63fb1 | 1153 | bool in_low_reclaim) |
9783aa99 | 1154 | { |
1bc63fb1 | 1155 | return 0; |
9783aa99 CD |
1156 | } |
1157 | ||
45c7f7e1 CD |
1158 | static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
1159 | struct mem_cgroup *memcg) | |
1160 | { | |
1161 | } | |
1162 | ||
1163 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
1164 | { | |
1165 | return false; | |
1166 | } | |
1167 | ||
1168 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
241994ed | 1169 | { |
45c7f7e1 | 1170 | return false; |
241994ed JW |
1171 | } |
1172 | ||
3fea5a49 | 1173 | static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
d9eb1ea2 | 1174 | gfp_t gfp_mask) |
3fea5a49 JW |
1175 | { |
1176 | return 0; | |
1177 | } | |
1178 | ||
0add0c77 SB |
1179 | static inline int mem_cgroup_swapin_charge_page(struct page *page, |
1180 | struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) | |
1181 | { | |
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) | |
1186 | { | |
1187 | } | |
1188 | ||
0a31bc97 | 1189 | static inline void mem_cgroup_uncharge(struct page *page) |
569b846d KH |
1190 | { |
1191 | } | |
1192 | ||
747db954 | 1193 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
1194 | { |
1195 | } | |
1196 | ||
6a93ca8f | 1197 | static inline void mem_cgroup_migrate(struct page *old, struct page *new) |
69029cd5 KH |
1198 | { |
1199 | } | |
1200 | ||
867e5e1d JW |
1201 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
1202 | struct pglist_data *pgdat) | |
08e552c6 | 1203 | { |
867e5e1d | 1204 | return &pgdat->__lruvec; |
08e552c6 KH |
1205 | } |
1206 | ||
a984226f | 1207 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) |
66e1707b | 1208 | { |
a984226f MS |
1209 | pg_data_t *pgdat = page_pgdat(page); |
1210 | ||
867e5e1d | 1211 | return &pgdat->__lruvec; |
66e1707b BS |
1212 | } |
1213 | ||
2d146aa3 JW |
1214 | static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) |
1215 | { | |
1216 | } | |
1217 | ||
b910718a JW |
1218 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
1219 | { | |
1220 | return NULL; | |
1221 | } | |
1222 | ||
587af308 | 1223 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 1224 | struct mem_cgroup *memcg) |
bed7161a | 1225 | { |
587af308 | 1226 | return true; |
bed7161a BS |
1227 | } |
1228 | ||
d46eb14b SB |
1229 | static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) |
1230 | { | |
1231 | return NULL; | |
1232 | } | |
1233 | ||
dc0b5864 RG |
1234 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
1235 | { | |
1236 | } | |
1237 | ||
6168d0da AS |
1238 | static inline struct lruvec *lock_page_lruvec(struct page *page) |
1239 | { | |
1240 | struct pglist_data *pgdat = page_pgdat(page); | |
1241 | ||
1242 | spin_lock(&pgdat->__lruvec.lru_lock); | |
1243 | return &pgdat->__lruvec; | |
1244 | } | |
1245 | ||
1246 | static inline struct lruvec *lock_page_lruvec_irq(struct page *page) | |
1247 | { | |
1248 | struct pglist_data *pgdat = page_pgdat(page); | |
1249 | ||
1250 | spin_lock_irq(&pgdat->__lruvec.lru_lock); | |
1251 | return &pgdat->__lruvec; | |
1252 | } | |
1253 | ||
1254 | static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page, | |
1255 | unsigned long *flagsp) | |
1256 | { | |
1257 | struct pglist_data *pgdat = page_pgdat(page); | |
1258 | ||
1259 | spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); | |
1260 | return &pgdat->__lruvec; | |
1261 | } | |
1262 | ||
5660048c JW |
1263 | static inline struct mem_cgroup * |
1264 | mem_cgroup_iter(struct mem_cgroup *root, | |
1265 | struct mem_cgroup *prev, | |
1266 | struct mem_cgroup_reclaim_cookie *reclaim) | |
1267 | { | |
1268 | return NULL; | |
1269 | } | |
1270 | ||
1271 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
1272 | struct mem_cgroup *prev) | |
1273 | { | |
1274 | } | |
1275 | ||
7c5f64f8 VD |
1276 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1277 | int (*fn)(struct task_struct *, void *), void *arg) | |
1278 | { | |
1279 | return 0; | |
1280 | } | |
1281 | ||
23047a96 | 1282 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
f8d66542 | 1283 | { |
23047a96 JW |
1284 | return 0; |
1285 | } | |
1286 | ||
1287 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
1288 | { | |
1289 | WARN_ON_ONCE(id); | |
1290 | /* XXX: This should always return root_mem_cgroup */ | |
1291 | return NULL; | |
f8d66542 | 1292 | } |
a636b327 | 1293 | |
aa9694bb CD |
1294 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
1295 | { | |
1296 | return NULL; | |
1297 | } | |
1298 | ||
2262185c RG |
1299 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
1300 | { | |
1301 | return NULL; | |
1302 | } | |
1303 | ||
eb01aaab | 1304 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
14797e23 | 1305 | { |
13308ca9 | 1306 | return true; |
14797e23 KM |
1307 | } |
1308 | ||
b4536f0c MH |
1309 | static inline |
1310 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
1311 | enum lru_list lru, int zone_idx) | |
1312 | { | |
1313 | return 0; | |
1314 | } | |
a3d8e054 | 1315 | |
bbec2e15 | 1316 | static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
7c5f64f8 VD |
1317 | { |
1318 | return 0; | |
1319 | } | |
1320 | ||
9783aa99 CD |
1321 | static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1322 | { | |
1323 | return 0; | |
1324 | } | |
1325 | ||
e222432b | 1326 | static inline void |
f0c867d9 | 1327 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1328 | { | |
1329 | } | |
1330 | ||
1331 | static inline void | |
1332 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | |
e222432b BS |
1333 | { |
1334 | } | |
1335 | ||
1c824a68 | 1336 | static inline void lock_page_memcg(struct page *page) |
89c06bd5 KH |
1337 | { |
1338 | } | |
1339 | ||
62cccb8c | 1340 | static inline void unlock_page_memcg(struct page *page) |
89c06bd5 KH |
1341 | { |
1342 | } | |
1343 | ||
b23afb93 TH |
1344 | static inline void mem_cgroup_handle_over_high(void) |
1345 | { | |
1346 | } | |
1347 | ||
29ef680a | 1348 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 JW |
1349 | { |
1350 | } | |
1351 | ||
29ef680a | 1352 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 JW |
1353 | { |
1354 | } | |
1355 | ||
3812c8c8 JW |
1356 | static inline bool task_in_memcg_oom(struct task_struct *p) |
1357 | { | |
1358 | return false; | |
1359 | } | |
1360 | ||
49426420 | 1361 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
1362 | { |
1363 | return false; | |
1364 | } | |
1365 | ||
3d8b38eb RG |
1366 | static inline struct mem_cgroup *mem_cgroup_get_oom_group( |
1367 | struct task_struct *victim, struct mem_cgroup *oom_domain) | |
1368 | { | |
1369 | return NULL; | |
1370 | } | |
1371 | ||
1372 | static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |
1373 | { | |
1374 | } | |
1375 | ||
00f3ca2c | 1376 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1377 | int idx, |
00f3ca2c | 1378 | int nr) |
2a2e4885 JW |
1379 | { |
1380 | } | |
1381 | ||
00f3ca2c | 1382 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1383 | int idx, |
00f3ca2c | 1384 | int nr) |
2a2e4885 JW |
1385 | { |
1386 | } | |
1387 | ||
42a30035 JW |
1388 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1389 | enum node_stat_item idx) | |
1390 | { | |
1391 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1392 | } | |
1393 | ||
205b20cc JW |
1394 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1395 | enum node_stat_item idx) | |
2a7106f2 | 1396 | { |
00f3ca2c | 1397 | return node_page_state(lruvec_pgdat(lruvec), idx); |
2a7106f2 GT |
1398 | } |
1399 | ||
eedc4e5a RG |
1400 | static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
1401 | enum node_stat_item idx, int val) | |
1402 | { | |
1403 | } | |
1404 | ||
da3ceeff | 1405 | static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
ec9f0238 RG |
1406 | int val) |
1407 | { | |
1408 | struct page *page = virt_to_head_page(p); | |
1409 | ||
1410 | __mod_node_page_state(page_pgdat(page), idx, val); | |
1411 | } | |
1412 | ||
da3ceeff | 1413 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1414 | int val) |
1415 | { | |
1416 | struct page *page = virt_to_head_page(p); | |
1417 | ||
1418 | mod_node_page_state(page_pgdat(page), idx, val); | |
1419 | } | |
1420 | ||
2262185c RG |
1421 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1422 | enum vm_event_item idx, | |
1423 | unsigned long count) | |
1424 | { | |
1425 | } | |
1426 | ||
9851ac13 KT |
1427 | static inline void __count_memcg_events(struct mem_cgroup *memcg, |
1428 | enum vm_event_item idx, | |
1429 | unsigned long count) | |
1430 | { | |
1431 | } | |
1432 | ||
2262185c | 1433 | static inline void count_memcg_page_event(struct page *page, |
04fecbf5 | 1434 | int idx) |
2262185c RG |
1435 | { |
1436 | } | |
1437 | ||
456f998e | 1438 | static inline |
2262185c | 1439 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) |
456f998e YH |
1440 | { |
1441 | } | |
6168d0da | 1442 | |
2d146aa3 JW |
1443 | static inline void split_page_memcg(struct page *head, unsigned int nr) |
1444 | { | |
1445 | } | |
1446 | ||
1447 | static inline | |
1448 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |
1449 | gfp_t gfp_mask, | |
1450 | unsigned long *total_scanned) | |
6168d0da | 1451 | { |
2d146aa3 | 1452 | return 0; |
6168d0da | 1453 | } |
c255a458 | 1454 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 1455 | |
da3ceeff | 1456 | static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1457 | { |
da3ceeff | 1458 | __mod_lruvec_kmem_state(p, idx, 1); |
ec9f0238 RG |
1459 | } |
1460 | ||
da3ceeff | 1461 | static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1462 | { |
da3ceeff | 1463 | __mod_lruvec_kmem_state(p, idx, -1); |
ec9f0238 RG |
1464 | } |
1465 | ||
7cf111bc JW |
1466 | static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) |
1467 | { | |
1468 | struct mem_cgroup *memcg; | |
1469 | ||
1470 | memcg = lruvec_memcg(lruvec); | |
1471 | if (!memcg) | |
1472 | return NULL; | |
1473 | memcg = parent_mem_cgroup(memcg); | |
1474 | if (!memcg) | |
1475 | return NULL; | |
1476 | return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); | |
1477 | } | |
1478 | ||
6168d0da AS |
1479 | static inline void unlock_page_lruvec(struct lruvec *lruvec) |
1480 | { | |
1481 | spin_unlock(&lruvec->lru_lock); | |
1482 | } | |
1483 | ||
1484 | static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) | |
1485 | { | |
1486 | spin_unlock_irq(&lruvec->lru_lock); | |
1487 | } | |
1488 | ||
1489 | static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, | |
1490 | unsigned long flags) | |
1491 | { | |
1492 | spin_unlock_irqrestore(&lruvec->lru_lock, flags); | |
1493 | } | |
1494 | ||
7467c391 MS |
1495 | /* Test requires a stable page->memcg binding, see page_memcg() */ |
1496 | static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) | |
f2e4d28d MS |
1497 | { |
1498 | return lruvec_pgdat(lruvec) == page_pgdat(page) && | |
1499 | lruvec_memcg(lruvec) == page_memcg(page); | |
1500 | } | |
1501 | ||
2a5e4e34 AD |
1502 | /* Don't lock again iff page's lruvec locked */ |
1503 | static inline struct lruvec *relock_page_lruvec_irq(struct page *page, | |
1504 | struct lruvec *locked_lruvec) | |
1505 | { | |
1506 | if (locked_lruvec) { | |
7467c391 | 1507 | if (page_matches_lruvec(page, locked_lruvec)) |
2a5e4e34 AD |
1508 | return locked_lruvec; |
1509 | ||
1510 | unlock_page_lruvec_irq(locked_lruvec); | |
1511 | } | |
1512 | ||
1513 | return lock_page_lruvec_irq(page); | |
1514 | } | |
1515 | ||
1516 | /* Don't lock again iff page's lruvec locked */ | |
1517 | static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, | |
1518 | struct lruvec *locked_lruvec, unsigned long *flags) | |
1519 | { | |
1520 | if (locked_lruvec) { | |
7467c391 | 1521 | if (page_matches_lruvec(page, locked_lruvec)) |
2a5e4e34 AD |
1522 | return locked_lruvec; |
1523 | ||
1524 | unlock_page_lruvec_irqrestore(locked_lruvec, *flags); | |
1525 | } | |
1526 | ||
1527 | return lock_page_lruvec_irqsave(page, flags); | |
1528 | } | |
1529 | ||
52ebea74 | 1530 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 1531 | |
841710aa | 1532 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
1533 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
1534 | unsigned long *pheadroom, unsigned long *pdirty, | |
1535 | unsigned long *pwriteback); | |
841710aa | 1536 | |
97b27821 TH |
1537 | void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, |
1538 | struct bdi_writeback *wb); | |
1539 | ||
1540 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, | |
1541 | struct bdi_writeback *wb) | |
1542 | { | |
08d1d0e6 BH |
1543 | if (mem_cgroup_disabled()) |
1544 | return; | |
1545 | ||
bcfe06bf | 1546 | if (unlikely(&page_memcg(page)->css != wb->memcg_css)) |
97b27821 TH |
1547 | mem_cgroup_track_foreign_dirty_slowpath(page, wb); |
1548 | } | |
1549 | ||
1550 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb); | |
1551 | ||
841710aa TH |
1552 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1553 | ||
1554 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
1555 | { | |
1556 | return NULL; | |
1557 | } | |
1558 | ||
c2aa723a | 1559 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
1560 | unsigned long *pfilepages, |
1561 | unsigned long *pheadroom, | |
c2aa723a TH |
1562 | unsigned long *pdirty, |
1563 | unsigned long *pwriteback) | |
1564 | { | |
1565 | } | |
1566 | ||
97b27821 TH |
1567 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, |
1568 | struct bdi_writeback *wb) | |
1569 | { | |
1570 | } | |
1571 | ||
1572 | static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) | |
1573 | { | |
1574 | } | |
1575 | ||
841710aa | 1576 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 1577 | |
e1aab161 | 1578 | struct sock; |
baac50bb JW |
1579 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
1580 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
d886f4e4 | 1581 | #ifdef CONFIG_MEMCG |
ef12947c JW |
1582 | extern struct static_key_false memcg_sockets_enabled_key; |
1583 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
2d758073 JW |
1584 | void mem_cgroup_sk_alloc(struct sock *sk); |
1585 | void mem_cgroup_sk_free(struct sock *sk); | |
baac50bb | 1586 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 1587 | { |
0db15298 | 1588 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
8e8ae645 | 1589 | return true; |
8e8ae645 JW |
1590 | do { |
1591 | if (time_before(jiffies, memcg->socket_pressure)) | |
1592 | return true; | |
1593 | } while ((memcg = parent_mem_cgroup(memcg))); | |
1594 | return false; | |
e805605c | 1595 | } |
0a432dcb | 1596 | |
e4262c4f YS |
1597 | int alloc_shrinker_info(struct mem_cgroup *memcg); |
1598 | void free_shrinker_info(struct mem_cgroup *memcg); | |
2bfd3637 | 1599 | void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); |
a178015c | 1600 | void reparent_shrinker_deferred(struct mem_cgroup *memcg); |
e805605c | 1601 | #else |
80e95fe0 | 1602 | #define mem_cgroup_sockets_enabled 0 |
2d758073 JW |
1603 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
1604 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
baac50bb | 1605 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
1606 | { |
1607 | return false; | |
1608 | } | |
0a432dcb | 1609 | |
2bfd3637 YS |
1610 | static inline void set_shrinker_bit(struct mem_cgroup *memcg, |
1611 | int nid, int shrinker_id) | |
0a432dcb YS |
1612 | { |
1613 | } | |
e805605c | 1614 | #endif |
7ae1e1d0 | 1615 | |
9b6f7e16 | 1616 | #ifdef CONFIG_MEMCG_KMEM |
f4b00eab RG |
1617 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); |
1618 | void __memcg_kmem_uncharge_page(struct page *page, int order); | |
45264778 | 1619 | |
bf4f0599 RG |
1620 | struct obj_cgroup *get_obj_cgroup_from_current(void); |
1621 | ||
1622 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); | |
1623 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); | |
1624 | ||
ef12947c | 1625 | extern struct static_key_false memcg_kmem_enabled_key; |
749c5415 | 1626 | |
dbcf73e2 | 1627 | extern int memcg_nr_cache_ids; |
64219994 MH |
1628 | void memcg_get_cache_ids(void); |
1629 | void memcg_put_cache_ids(void); | |
ebe945c2 GC |
1630 | |
1631 | /* | |
1632 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
1633 | * check if the cache is valid (it is either valid or NULL). | |
1634 | * the slab_mutex must be held when looping through those caches | |
1635 | */ | |
749c5415 | 1636 | #define for_each_memcg_cache_index(_idx) \ |
dbcf73e2 | 1637 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c5415 | 1638 | |
7ae1e1d0 GC |
1639 | static inline bool memcg_kmem_enabled(void) |
1640 | { | |
eda330e5 | 1641 | return static_branch_likely(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
1642 | } |
1643 | ||
f4b00eab RG |
1644 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1645 | int order) | |
60cd4bcd SB |
1646 | { |
1647 | if (memcg_kmem_enabled()) | |
f4b00eab | 1648 | return __memcg_kmem_charge_page(page, gfp, order); |
60cd4bcd SB |
1649 | return 0; |
1650 | } | |
1651 | ||
f4b00eab | 1652 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1653 | { |
1654 | if (memcg_kmem_enabled()) | |
f4b00eab | 1655 | __memcg_kmem_uncharge_page(page, order); |
60cd4bcd SB |
1656 | } |
1657 | ||
33398cf2 | 1658 | /* |
a7cb874b RG |
1659 | * A helper for accessing memcg's kmem_id, used for getting |
1660 | * corresponding LRU lists. | |
33398cf2 MH |
1661 | */ |
1662 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
1663 | { | |
1664 | return memcg ? memcg->kmemcg_id : -1; | |
1665 | } | |
5722d094 | 1666 | |
8380ce47 RG |
1667 | struct mem_cgroup *mem_cgroup_from_obj(void *p); |
1668 | ||
7ae1e1d0 | 1669 | #else |
9b6f7e16 | 1670 | |
f4b00eab RG |
1671 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1672 | int order) | |
9b6f7e16 RG |
1673 | { |
1674 | return 0; | |
1675 | } | |
1676 | ||
f4b00eab | 1677 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
9b6f7e16 RG |
1678 | { |
1679 | } | |
1680 | ||
f4b00eab RG |
1681 | static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1682 | int order) | |
60cd4bcd SB |
1683 | { |
1684 | return 0; | |
1685 | } | |
1686 | ||
f4b00eab | 1687 | static inline void __memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1688 | { |
1689 | } | |
1690 | ||
749c5415 GC |
1691 | #define for_each_memcg_cache_index(_idx) \ |
1692 | for (; NULL; ) | |
1693 | ||
b9ce5ef4 GC |
1694 | static inline bool memcg_kmem_enabled(void) |
1695 | { | |
1696 | return false; | |
1697 | } | |
1698 | ||
2633d7a0 GC |
1699 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
1700 | { | |
1701 | return -1; | |
1702 | } | |
1703 | ||
05257a1a VD |
1704 | static inline void memcg_get_cache_ids(void) |
1705 | { | |
1706 | } | |
1707 | ||
1708 | static inline void memcg_put_cache_ids(void) | |
1709 | { | |
1710 | } | |
1711 | ||
8380ce47 RG |
1712 | static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) |
1713 | { | |
1714 | return NULL; | |
1715 | } | |
1716 | ||
84c07d11 | 1717 | #endif /* CONFIG_MEMCG_KMEM */ |
127424c8 | 1718 | |
8cdea7c0 | 1719 | #endif /* _LINUX_MEMCONTROL_H */ |