]>
Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.h - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | |
21 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 22 | #include <linux/cgroup.h> |
456f998e | 23 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 24 | #include <linux/hardirq.h> |
a8964b9b | 25 | #include <linux/jump_label.h> |
33398cf2 MH |
26 | #include <linux/page_counter.h> |
27 | #include <linux/vmpressure.h> | |
28 | #include <linux/eventfd.h> | |
29 | #include <linux/mmzone.h> | |
30 | #include <linux/writeback.h> | |
456f998e | 31 | |
78fb7466 | 32 | struct mem_cgroup; |
8697d331 BS |
33 | struct page; |
34 | struct mm_struct; | |
2633d7a0 | 35 | struct kmem_cache; |
78fb7466 | 36 | |
68b4876d SZ |
37 | /* |
38 | * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, | |
39 | * These two lists should keep in accord with each other. | |
40 | */ | |
41 | enum mem_cgroup_stat_index { | |
42 | /* | |
43 | * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | |
44 | */ | |
45 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ | |
46 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ | |
47 | MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ | |
48 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | |
c4843a75 | 49 | MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ |
3ea67d06 | 50 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ |
68b4876d SZ |
51 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ |
52 | MEM_CGROUP_STAT_NSTATS, | |
2a7106f2 GT |
53 | }; |
54 | ||
5660048c JW |
55 | struct mem_cgroup_reclaim_cookie { |
56 | struct zone *zone; | |
57 | int priority; | |
58 | unsigned int generation; | |
59 | }; | |
60 | ||
241994ed JW |
61 | enum mem_cgroup_events_index { |
62 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ | |
63 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ | |
64 | MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ | |
65 | MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ | |
66 | MEM_CGROUP_EVENTS_NSTATS, | |
67 | /* default hierarchy events */ | |
68 | MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, | |
69 | MEMCG_HIGH, | |
70 | MEMCG_MAX, | |
71 | MEMCG_OOM, | |
72 | MEMCG_NR_EVENTS, | |
73 | }; | |
74 | ||
33398cf2 MH |
75 | /* |
76 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
77 | * it will be incremated by the number of pages. This counter is used for | |
78 | * for trigger some periodic events. This is straightforward and better | |
79 | * than using jiffies etc. to handle periodic memcg event. | |
80 | */ | |
81 | enum mem_cgroup_events_target { | |
82 | MEM_CGROUP_TARGET_THRESH, | |
83 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
84 | MEM_CGROUP_TARGET_NUMAINFO, | |
85 | MEM_CGROUP_NTARGETS, | |
86 | }; | |
87 | ||
33398cf2 MH |
88 | struct cg_proto { |
89 | struct page_counter memory_allocated; /* Current allocated memory. */ | |
33398cf2 | 90 | int memory_pressure; |
9ee11ba4 | 91 | bool active; |
33398cf2 MH |
92 | }; |
93 | ||
c255a458 | 94 | #ifdef CONFIG_MEMCG |
33398cf2 MH |
95 | struct mem_cgroup_stat_cpu { |
96 | long count[MEM_CGROUP_STAT_NSTATS]; | |
97 | unsigned long events[MEMCG_NR_EVENTS]; | |
98 | unsigned long nr_page_events; | |
99 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
100 | }; | |
101 | ||
102 | struct mem_cgroup_reclaim_iter { | |
103 | struct mem_cgroup *position; | |
104 | /* scan generation, increased every round-trip */ | |
105 | unsigned int generation; | |
106 | }; | |
107 | ||
108 | /* | |
109 | * per-zone information in memory controller. | |
110 | */ | |
111 | struct mem_cgroup_per_zone { | |
112 | struct lruvec lruvec; | |
113 | unsigned long lru_size[NR_LRU_LISTS]; | |
114 | ||
115 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; | |
116 | ||
117 | struct rb_node tree_node; /* RB tree node */ | |
118 | unsigned long usage_in_excess;/* Set to the value by which */ | |
119 | /* the soft limit is exceeded*/ | |
120 | bool on_tree; | |
121 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
122 | /* use container_of */ | |
123 | }; | |
124 | ||
125 | struct mem_cgroup_per_node { | |
126 | struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | |
127 | }; | |
128 | ||
129 | struct mem_cgroup_threshold { | |
130 | struct eventfd_ctx *eventfd; | |
131 | unsigned long threshold; | |
132 | }; | |
133 | ||
134 | /* For threshold */ | |
135 | struct mem_cgroup_threshold_ary { | |
136 | /* An array index points to threshold just below or equal to usage. */ | |
137 | int current_threshold; | |
138 | /* Size of entries[] */ | |
139 | unsigned int size; | |
140 | /* Array of thresholds */ | |
141 | struct mem_cgroup_threshold entries[0]; | |
142 | }; | |
143 | ||
144 | struct mem_cgroup_thresholds { | |
145 | /* Primary thresholds array */ | |
146 | struct mem_cgroup_threshold_ary *primary; | |
147 | /* | |
148 | * Spare threshold array. | |
149 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
150 | * It must be able to store at least primary->size - 1 entries. | |
151 | */ | |
152 | struct mem_cgroup_threshold_ary *spare; | |
153 | }; | |
154 | ||
155 | /* | |
156 | * The memory controller data structure. The memory controller controls both | |
157 | * page cache and RSS per cgroup. We would eventually like to provide | |
158 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
159 | * to help the administrator determine what knobs to tune. | |
160 | */ | |
161 | struct mem_cgroup { | |
162 | struct cgroup_subsys_state css; | |
163 | ||
164 | /* Accounted resources */ | |
165 | struct page_counter memory; | |
166 | struct page_counter memsw; | |
167 | struct page_counter kmem; | |
168 | ||
169 | /* Normal memory consumption range */ | |
170 | unsigned long low; | |
171 | unsigned long high; | |
172 | ||
173 | unsigned long soft_limit; | |
174 | ||
175 | /* vmpressure notifications */ | |
176 | struct vmpressure vmpressure; | |
177 | ||
178 | /* css_online() has been completed */ | |
179 | int initialized; | |
180 | ||
181 | /* | |
182 | * Should the accounting and control be hierarchical, per subtree? | |
183 | */ | |
184 | bool use_hierarchy; | |
185 | ||
186 | /* protected by memcg_oom_lock */ | |
187 | bool oom_lock; | |
188 | int under_oom; | |
189 | ||
190 | int swappiness; | |
191 | /* OOM-Killer disable */ | |
192 | int oom_kill_disable; | |
193 | ||
472912a2 TH |
194 | /* handle for "memory.events" */ |
195 | struct cgroup_file events_file; | |
196 | ||
33398cf2 MH |
197 | /* protect arrays of thresholds */ |
198 | struct mutex thresholds_lock; | |
199 | ||
200 | /* thresholds for memory usage. RCU-protected */ | |
201 | struct mem_cgroup_thresholds thresholds; | |
202 | ||
203 | /* thresholds for mem+swap usage. RCU-protected */ | |
204 | struct mem_cgroup_thresholds memsw_thresholds; | |
205 | ||
206 | /* For oom notifier event fd */ | |
207 | struct list_head oom_notify; | |
208 | ||
209 | /* | |
210 | * Should we move charges of a task when a task is moved into this | |
211 | * mem_cgroup ? And what type of charges should we move ? | |
212 | */ | |
213 | unsigned long move_charge_at_immigrate; | |
214 | /* | |
215 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
216 | */ | |
217 | atomic_t moving_account; | |
218 | /* taken only while moving_account > 0 */ | |
219 | spinlock_t move_lock; | |
220 | struct task_struct *move_lock_task; | |
221 | unsigned long move_lock_flags; | |
222 | /* | |
223 | * percpu counter. | |
224 | */ | |
225 | struct mem_cgroup_stat_cpu __percpu *stat; | |
33398cf2 MH |
226 | |
227 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | |
228 | struct cg_proto tcp_mem; | |
229 | #endif | |
230 | #if defined(CONFIG_MEMCG_KMEM) | |
231 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ | |
232 | int kmemcg_id; | |
233 | bool kmem_acct_activated; | |
234 | bool kmem_acct_active; | |
235 | #endif | |
236 | ||
237 | int last_scanned_node; | |
238 | #if MAX_NUMNODES > 1 | |
239 | nodemask_t scan_nodes; | |
240 | atomic_t numainfo_events; | |
241 | atomic_t numainfo_updating; | |
242 | #endif | |
243 | ||
244 | #ifdef CONFIG_CGROUP_WRITEBACK | |
245 | struct list_head cgwb_list; | |
246 | struct wb_domain cgwb_domain; | |
247 | #endif | |
248 | ||
249 | /* List of events which userspace want to receive */ | |
250 | struct list_head event_list; | |
251 | spinlock_t event_list_lock; | |
252 | ||
253 | struct mem_cgroup_per_node *nodeinfo[0]; | |
254 | /* WARNING: nodeinfo must be the last member here */ | |
255 | }; | |
7d828602 JW |
256 | |
257 | extern struct mem_cgroup *root_mem_cgroup; | |
56161634 | 258 | |
33398cf2 MH |
259 | /** |
260 | * mem_cgroup_events - count memory events against a cgroup | |
261 | * @memcg: the memory cgroup | |
262 | * @idx: the event index | |
263 | * @nr: the number of events to account for | |
264 | */ | |
265 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | |
241994ed | 266 | enum mem_cgroup_events_index idx, |
33398cf2 MH |
267 | unsigned int nr) |
268 | { | |
269 | this_cpu_add(memcg->stat->events[idx], nr); | |
472912a2 | 270 | cgroup_file_notify(&memcg->events_file); |
33398cf2 | 271 | } |
241994ed JW |
272 | |
273 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | |
274 | ||
00501b53 JW |
275 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
276 | gfp_t gfp_mask, struct mem_cgroup **memcgp); | |
277 | void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, | |
278 | bool lrucare); | |
279 | void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); | |
0a31bc97 | 280 | void mem_cgroup_uncharge(struct page *page); |
747db954 | 281 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
569b846d | 282 | |
45637bab | 283 | void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); |
569b846d | 284 | |
0a31bc97 JW |
285 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
286 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | |
c9b0ed51 | 287 | |
2314b42d | 288 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
64219994 | 289 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
64219994 | 290 | struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
e993d905 | 291 | |
33398cf2 MH |
292 | static inline |
293 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
294 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
295 | } | |
296 | ||
297 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | |
298 | struct mem_cgroup *, | |
299 | struct mem_cgroup_reclaim_cookie *); | |
300 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
301 | ||
302 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, | |
303 | struct mem_cgroup *root) | |
304 | { | |
305 | if (root == memcg) | |
306 | return true; | |
307 | if (!root->use_hierarchy) | |
308 | return false; | |
309 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); | |
310 | } | |
e1aab161 | 311 | |
2314b42d JW |
312 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
313 | struct mem_cgroup *memcg) | |
2e4d4091 | 314 | { |
587af308 | 315 | struct mem_cgroup *task_memcg; |
413918bb | 316 | bool match = false; |
c3ac9a8a | 317 | |
2e4d4091 | 318 | rcu_read_lock(); |
587af308 | 319 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 320 | if (task_memcg) |
2314b42d | 321 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 322 | rcu_read_unlock(); |
c3ac9a8a | 323 | return match; |
2e4d4091 | 324 | } |
8a9f3ccd | 325 | |
64219994 | 326 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 327 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 328 | |
33398cf2 MH |
329 | static inline bool mem_cgroup_disabled(void) |
330 | { | |
fc5ed1e9 | 331 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
33398cf2 | 332 | } |
5660048c | 333 | |
58ae83db KH |
334 | /* |
335 | * For memory reclaim. | |
336 | */ | |
889976db | 337 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
33398cf2 MH |
338 | |
339 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
340 | int nr_pages); | |
341 | ||
342 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | |
343 | { | |
344 | struct mem_cgroup_per_zone *mz; | |
345 | struct mem_cgroup *memcg; | |
346 | ||
347 | if (mem_cgroup_disabled()) | |
348 | return true; | |
349 | ||
350 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); | |
351 | memcg = mz->memcg; | |
352 | ||
353 | return !!(memcg->css.flags & CSS_ONLINE); | |
354 | } | |
355 | ||
356 | static inline | |
357 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |
358 | { | |
359 | struct mem_cgroup_per_zone *mz; | |
360 | ||
361 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); | |
362 | return mz->lru_size[lru]; | |
363 | } | |
364 | ||
13308ca9 | 365 | static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
33398cf2 MH |
366 | { |
367 | unsigned long inactive_ratio; | |
368 | unsigned long inactive; | |
369 | unsigned long active; | |
370 | unsigned long gb; | |
371 | ||
372 | inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); | |
373 | active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); | |
374 | ||
375 | gb = (inactive + active) >> (30 - PAGE_SHIFT); | |
376 | if (gb) | |
377 | inactive_ratio = int_sqrt(10 * gb); | |
378 | else | |
379 | inactive_ratio = 1; | |
380 | ||
381 | return inactive * inactive_ratio < active; | |
382 | } | |
383 | ||
b23afb93 TH |
384 | void mem_cgroup_handle_over_high(void); |
385 | ||
64219994 MH |
386 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
387 | struct task_struct *p); | |
58ae83db | 388 | |
49426420 | 389 | static inline void mem_cgroup_oom_enable(void) |
519e5247 | 390 | { |
626ebc41 TH |
391 | WARN_ON(current->memcg_may_oom); |
392 | current->memcg_may_oom = 1; | |
519e5247 JW |
393 | } |
394 | ||
49426420 | 395 | static inline void mem_cgroup_oom_disable(void) |
519e5247 | 396 | { |
626ebc41 TH |
397 | WARN_ON(!current->memcg_may_oom); |
398 | current->memcg_may_oom = 0; | |
519e5247 JW |
399 | } |
400 | ||
3812c8c8 JW |
401 | static inline bool task_in_memcg_oom(struct task_struct *p) |
402 | { | |
626ebc41 | 403 | return p->memcg_in_oom; |
3812c8c8 JW |
404 | } |
405 | ||
49426420 | 406 | bool mem_cgroup_oom_synchronize(bool wait); |
3812c8c8 | 407 | |
c255a458 | 408 | #ifdef CONFIG_MEMCG_SWAP |
c077719b KH |
409 | extern int do_swap_account; |
410 | #endif | |
f8d66542 | 411 | |
6de22619 | 412 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); |
6de22619 | 413 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); |
d7365e78 | 414 | |
33398cf2 MH |
415 | /** |
416 | * mem_cgroup_update_page_stat - update page state statistics | |
417 | * @memcg: memcg to account against | |
418 | * @idx: page state item to account | |
419 | * @val: number of pages (positive or negative) | |
420 | * | |
421 | * See mem_cgroup_begin_page_stat() for locking requirements. | |
422 | */ | |
423 | static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | |
424 | enum mem_cgroup_stat_index idx, int val) | |
425 | { | |
426 | VM_BUG_ON(!rcu_read_lock_held()); | |
427 | ||
428 | if (memcg) | |
429 | this_cpu_add(memcg->stat->count[idx], val); | |
430 | } | |
431 | ||
d7365e78 | 432 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
68b4876d | 433 | enum mem_cgroup_stat_index idx) |
2a7106f2 | 434 | { |
d7365e78 | 435 | mem_cgroup_update_page_stat(memcg, idx, 1); |
2a7106f2 GT |
436 | } |
437 | ||
d7365e78 | 438 | static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, |
68b4876d | 439 | enum mem_cgroup_stat_index idx) |
2a7106f2 | 440 | { |
d7365e78 | 441 | mem_cgroup_update_page_stat(memcg, idx, -1); |
2a7106f2 GT |
442 | } |
443 | ||
0608f43d AM |
444 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
445 | gfp_t gfp_mask, | |
446 | unsigned long *total_scanned); | |
a63d83f4 | 447 | |
68ae564b DR |
448 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, |
449 | enum vm_event_item idx) | |
450 | { | |
33398cf2 MH |
451 | struct mem_cgroup *memcg; |
452 | ||
68ae564b DR |
453 | if (mem_cgroup_disabled()) |
454 | return; | |
33398cf2 MH |
455 | |
456 | rcu_read_lock(); | |
457 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
458 | if (unlikely(!memcg)) | |
459 | goto out; | |
460 | ||
461 | switch (idx) { | |
462 | case PGFAULT: | |
463 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); | |
464 | break; | |
465 | case PGMAJFAULT: | |
466 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); | |
467 | break; | |
468 | default: | |
469 | BUG(); | |
470 | } | |
471 | out: | |
472 | rcu_read_unlock(); | |
68ae564b | 473 | } |
ca3e0214 | 474 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e94c8a9c | 475 | void mem_cgroup_split_huge_fixup(struct page *head); |
ca3e0214 KH |
476 | #endif |
477 | ||
c255a458 | 478 | #else /* CONFIG_MEMCG */ |
7a81b88c KH |
479 | struct mem_cgroup; |
480 | ||
241994ed JW |
481 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, |
482 | enum mem_cgroup_events_index idx, | |
483 | unsigned int nr) | |
484 | { | |
485 | } | |
486 | ||
487 | static inline bool mem_cgroup_low(struct mem_cgroup *root, | |
488 | struct mem_cgroup *memcg) | |
489 | { | |
490 | return false; | |
491 | } | |
492 | ||
00501b53 JW |
493 | static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
494 | gfp_t gfp_mask, | |
495 | struct mem_cgroup **memcgp) | |
7a81b88c | 496 | { |
00501b53 | 497 | *memcgp = NULL; |
7a81b88c KH |
498 | return 0; |
499 | } | |
500 | ||
00501b53 JW |
501 | static inline void mem_cgroup_commit_charge(struct page *page, |
502 | struct mem_cgroup *memcg, | |
503 | bool lrucare) | |
7a81b88c KH |
504 | { |
505 | } | |
506 | ||
00501b53 JW |
507 | static inline void mem_cgroup_cancel_charge(struct page *page, |
508 | struct mem_cgroup *memcg) | |
7a81b88c KH |
509 | { |
510 | } | |
511 | ||
0a31bc97 | 512 | static inline void mem_cgroup_uncharge(struct page *page) |
569b846d KH |
513 | { |
514 | } | |
515 | ||
747db954 | 516 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
517 | { |
518 | } | |
519 | ||
45637bab | 520 | static inline void mem_cgroup_replace_page(struct page *old, struct page *new) |
69029cd5 KH |
521 | { |
522 | } | |
523 | ||
925b7673 JW |
524 | static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, |
525 | struct mem_cgroup *memcg) | |
08e552c6 | 526 | { |
925b7673 | 527 | return &zone->lruvec; |
08e552c6 KH |
528 | } |
529 | ||
fa9add64 HD |
530 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, |
531 | struct zone *zone) | |
66e1707b | 532 | { |
925b7673 | 533 | return &zone->lruvec; |
66e1707b BS |
534 | } |
535 | ||
587af308 | 536 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 537 | struct mem_cgroup *memcg) |
bed7161a | 538 | { |
587af308 | 539 | return true; |
bed7161a BS |
540 | } |
541 | ||
ffbdccf5 DR |
542 | static inline bool task_in_mem_cgroup(struct task_struct *task, |
543 | const struct mem_cgroup *memcg) | |
4c4a2214 | 544 | { |
ffbdccf5 | 545 | return true; |
4c4a2214 DR |
546 | } |
547 | ||
5660048c JW |
548 | static inline struct mem_cgroup * |
549 | mem_cgroup_iter(struct mem_cgroup *root, | |
550 | struct mem_cgroup *prev, | |
551 | struct mem_cgroup_reclaim_cookie *reclaim) | |
552 | { | |
553 | return NULL; | |
554 | } | |
555 | ||
556 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
557 | struct mem_cgroup *prev) | |
558 | { | |
559 | } | |
560 | ||
f8d66542 HT |
561 | static inline bool mem_cgroup_disabled(void) |
562 | { | |
563 | return true; | |
564 | } | |
a636b327 | 565 | |
13308ca9 | 566 | static inline bool |
c56d5c7d | 567 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
14797e23 | 568 | { |
13308ca9 | 569 | return true; |
14797e23 KM |
570 | } |
571 | ||
90cbc250 VD |
572 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) |
573 | { | |
574 | return true; | |
575 | } | |
576 | ||
a3d8e054 | 577 | static inline unsigned long |
4d7dcca2 | 578 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
a3d8e054 KM |
579 | { |
580 | return 0; | |
581 | } | |
582 | ||
fa9add64 HD |
583 | static inline void |
584 | mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
585 | int increment) | |
3e2f41f1 | 586 | { |
3e2f41f1 KM |
587 | } |
588 | ||
e222432b BS |
589 | static inline void |
590 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |
591 | { | |
592 | } | |
593 | ||
6de22619 | 594 | static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) |
89c06bd5 | 595 | { |
d7365e78 | 596 | return NULL; |
89c06bd5 KH |
597 | } |
598 | ||
6de22619 | 599 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) |
89c06bd5 KH |
600 | { |
601 | } | |
602 | ||
b23afb93 TH |
603 | static inline void mem_cgroup_handle_over_high(void) |
604 | { | |
605 | } | |
606 | ||
49426420 | 607 | static inline void mem_cgroup_oom_enable(void) |
519e5247 JW |
608 | { |
609 | } | |
610 | ||
49426420 | 611 | static inline void mem_cgroup_oom_disable(void) |
519e5247 JW |
612 | { |
613 | } | |
614 | ||
3812c8c8 JW |
615 | static inline bool task_in_memcg_oom(struct task_struct *p) |
616 | { | |
617 | return false; | |
618 | } | |
619 | ||
49426420 | 620 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
621 | { |
622 | return false; | |
623 | } | |
624 | ||
d7365e78 | 625 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
68b4876d | 626 | enum mem_cgroup_stat_index idx) |
2a7106f2 GT |
627 | { |
628 | } | |
629 | ||
d7365e78 | 630 | static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, |
68b4876d | 631 | enum mem_cgroup_stat_index idx) |
d69b042f BS |
632 | { |
633 | } | |
634 | ||
4e416953 | 635 | static inline |
0608f43d AM |
636 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
637 | gfp_t gfp_mask, | |
638 | unsigned long *total_scanned) | |
4e416953 | 639 | { |
0608f43d | 640 | return 0; |
4e416953 BS |
641 | } |
642 | ||
e94c8a9c | 643 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
ca3e0214 KH |
644 | { |
645 | } | |
646 | ||
456f998e YH |
647 | static inline |
648 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | |
649 | { | |
650 | } | |
c255a458 | 651 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 652 | |
52ebea74 | 653 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 654 | |
52ebea74 | 655 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); |
841710aa | 656 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
657 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
658 | unsigned long *pheadroom, unsigned long *pdirty, | |
659 | unsigned long *pwriteback); | |
841710aa TH |
660 | |
661 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
662 | ||
663 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
664 | { | |
665 | return NULL; | |
666 | } | |
667 | ||
c2aa723a | 668 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
669 | unsigned long *pfilepages, |
670 | unsigned long *pheadroom, | |
c2aa723a TH |
671 | unsigned long *pdirty, |
672 | unsigned long *pwriteback) | |
673 | { | |
674 | } | |
675 | ||
841710aa | 676 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 677 | |
e1aab161 | 678 | struct sock; |
e1aab161 GC |
679 | void sock_update_memcg(struct sock *sk); |
680 | void sock_release_memcg(struct sock *sk); | |
baac50bb JW |
681 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
682 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
e805605c | 683 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
80e95fe0 JW |
684 | extern struct static_key memcg_sockets_enabled_key; |
685 | #define mem_cgroup_sockets_enabled static_key_false(&memcg_sockets_enabled_key) | |
baac50bb | 686 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 687 | { |
baac50bb | 688 | return memcg->tcp_mem.memory_pressure; |
e805605c JW |
689 | } |
690 | #else | |
80e95fe0 | 691 | #define mem_cgroup_sockets_enabled 0 |
baac50bb | 692 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
693 | { |
694 | return false; | |
695 | } | |
696 | #endif | |
7ae1e1d0 GC |
697 | |
698 | #ifdef CONFIG_MEMCG_KMEM | |
a8964b9b | 699 | extern struct static_key memcg_kmem_enabled_key; |
749c5415 | 700 | |
dbcf73e2 | 701 | extern int memcg_nr_cache_ids; |
64219994 MH |
702 | void memcg_get_cache_ids(void); |
703 | void memcg_put_cache_ids(void); | |
ebe945c2 GC |
704 | |
705 | /* | |
706 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
707 | * check if the cache is valid (it is either valid or NULL). | |
708 | * the slab_mutex must be held when looping through those caches | |
709 | */ | |
749c5415 | 710 | #define for_each_memcg_cache_index(_idx) \ |
dbcf73e2 | 711 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c5415 | 712 | |
7ae1e1d0 GC |
713 | static inline bool memcg_kmem_enabled(void) |
714 | { | |
a8964b9b | 715 | return static_key_false(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
716 | } |
717 | ||
33398cf2 MH |
718 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) |
719 | { | |
720 | return memcg->kmem_acct_active; | |
721 | } | |
cb731d6c | 722 | |
7ae1e1d0 GC |
723 | /* |
724 | * In general, we'll do everything in our power to not incur in any overhead | |
725 | * for non-memcg users for the kmem functions. Not even a function call, if we | |
726 | * can avoid it. | |
727 | * | |
728 | * Therefore, we'll inline all those functions so that in the best case, we'll | |
729 | * see that kmemcg is off for everybody and proceed quickly. If it is on, | |
730 | * we'll still do most of the flag checking inline. We check a lot of | |
731 | * conditions, but because they are pretty simple, they are expected to be | |
732 | * fast. | |
733 | */ | |
f3ccb2c4 VD |
734 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, |
735 | struct mem_cgroup *memcg); | |
d05e83a6 VD |
736 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); |
737 | void __memcg_kmem_uncharge(struct page *page, int order); | |
7ae1e1d0 | 738 | |
33398cf2 MH |
739 | /* |
740 | * helper for acessing a memcg's index. It will be used as an index in the | |
741 | * child cache array in kmem_cache, and also to derive its name. This function | |
742 | * will return -1 when this is not a kmem-limited memcg. | |
743 | */ | |
744 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
745 | { | |
746 | return memcg ? memcg->kmemcg_id : -1; | |
747 | } | |
5722d094 | 748 | |
230e9fc2 | 749 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); |
8135be5a | 750 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
d7f25f8a | 751 | |
230e9fc2 | 752 | static inline bool __memcg_kmem_bypass(void) |
7ae1e1d0 GC |
753 | { |
754 | if (!memcg_kmem_enabled()) | |
755 | return true; | |
7ae1e1d0 GC |
756 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) |
757 | return true; | |
cbfb4798 | 758 | return false; |
7ae1e1d0 GC |
759 | } |
760 | ||
761 | /** | |
d05e83a6 VD |
762 | * memcg_kmem_charge: charge a kmem page |
763 | * @page: page to charge | |
764 | * @gfp: reclaim mode | |
765 | * @order: allocation order | |
cbfb4798 | 766 | * |
d05e83a6 | 767 | * Returns 0 on success, an error code on failure. |
7ae1e1d0 | 768 | */ |
d05e83a6 VD |
769 | static __always_inline int memcg_kmem_charge(struct page *page, |
770 | gfp_t gfp, int order) | |
7ae1e1d0 | 771 | { |
230e9fc2 VD |
772 | if (__memcg_kmem_bypass()) |
773 | return 0; | |
774 | if (!(gfp & __GFP_ACCOUNT)) | |
d05e83a6 VD |
775 | return 0; |
776 | return __memcg_kmem_charge(page, gfp, order); | |
7ae1e1d0 GC |
777 | } |
778 | ||
779 | /** | |
d05e83a6 VD |
780 | * memcg_kmem_uncharge: uncharge a kmem page |
781 | * @page: page to uncharge | |
782 | * @order: allocation order | |
7ae1e1d0 | 783 | */ |
d05e83a6 | 784 | static __always_inline void memcg_kmem_uncharge(struct page *page, int order) |
7ae1e1d0 | 785 | { |
7ae1e1d0 | 786 | if (memcg_kmem_enabled()) |
d05e83a6 | 787 | __memcg_kmem_uncharge(page, order); |
7ae1e1d0 GC |
788 | } |
789 | ||
d7f25f8a GC |
790 | /** |
791 | * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation | |
792 | * @cachep: the original global kmem cache | |
d7f25f8a | 793 | * |
5dfb4175 | 794 | * All memory allocated from a per-memcg cache is charged to the owner memcg. |
d7f25f8a GC |
795 | */ |
796 | static __always_inline struct kmem_cache * | |
797 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |
798 | { | |
230e9fc2 | 799 | if (__memcg_kmem_bypass()) |
d7f25f8a | 800 | return cachep; |
230e9fc2 | 801 | return __memcg_kmem_get_cache(cachep, gfp); |
d7f25f8a | 802 | } |
8135be5a VD |
803 | |
804 | static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |
805 | { | |
806 | if (memcg_kmem_enabled()) | |
807 | __memcg_kmem_put_cache(cachep); | |
808 | } | |
7ae1e1d0 | 809 | #else |
749c5415 GC |
810 | #define for_each_memcg_cache_index(_idx) \ |
811 | for (; NULL; ) | |
812 | ||
b9ce5ef4 GC |
813 | static inline bool memcg_kmem_enabled(void) |
814 | { | |
815 | return false; | |
816 | } | |
817 | ||
cb731d6c VD |
818 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) |
819 | { | |
820 | return false; | |
821 | } | |
822 | ||
d05e83a6 | 823 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) |
7ae1e1d0 | 824 | { |
d05e83a6 | 825 | return 0; |
7ae1e1d0 GC |
826 | } |
827 | ||
d05e83a6 | 828 | static inline void memcg_kmem_uncharge(struct page *page, int order) |
7ae1e1d0 GC |
829 | { |
830 | } | |
2633d7a0 GC |
831 | |
832 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
833 | { | |
834 | return -1; | |
835 | } | |
836 | ||
05257a1a VD |
837 | static inline void memcg_get_cache_ids(void) |
838 | { | |
839 | } | |
840 | ||
841 | static inline void memcg_put_cache_ids(void) | |
842 | { | |
843 | } | |
844 | ||
d7f25f8a GC |
845 | static inline struct kmem_cache * |
846 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |
847 | { | |
848 | return cachep; | |
849 | } | |
8135be5a VD |
850 | |
851 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |
852 | { | |
853 | } | |
7ae1e1d0 | 854 | #endif /* CONFIG_MEMCG_KMEM */ |
8cdea7c0 | 855 | #endif /* _LINUX_MEMCONTROL_H */ |