]>
Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.h - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | |
21 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 22 | #include <linux/cgroup.h> |
456f998e | 23 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 24 | #include <linux/hardirq.h> |
a8964b9b | 25 | #include <linux/jump_label.h> |
33398cf2 MH |
26 | #include <linux/page_counter.h> |
27 | #include <linux/vmpressure.h> | |
28 | #include <linux/eventfd.h> | |
29 | #include <linux/mmzone.h> | |
30 | #include <linux/writeback.h> | |
fdf1cdb9 | 31 | #include <linux/page-flags.h> |
456f998e | 32 | |
78fb7466 | 33 | struct mem_cgroup; |
8697d331 BS |
34 | struct page; |
35 | struct mm_struct; | |
2633d7a0 | 36 | struct kmem_cache; |
78fb7466 | 37 | |
68b4876d SZ |
38 | /* |
39 | * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, | |
40 | * These two lists should keep in accord with each other. | |
41 | */ | |
42 | enum mem_cgroup_stat_index { | |
43 | /* | |
44 | * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | |
45 | */ | |
46 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ | |
47 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ | |
48 | MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ | |
49 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | |
c4843a75 | 50 | MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ |
3ea67d06 | 51 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ |
68b4876d SZ |
52 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ |
53 | MEM_CGROUP_STAT_NSTATS, | |
b2807f07 | 54 | /* default hierarchy stats */ |
efdc9490 | 55 | MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS, |
27ee57c9 VD |
56 | MEMCG_SLAB_RECLAIMABLE, |
57 | MEMCG_SLAB_UNRECLAIMABLE, | |
12580e4b | 58 | MEMCG_SOCK, |
b2807f07 | 59 | MEMCG_NR_STAT, |
2a7106f2 GT |
60 | }; |
61 | ||
5660048c | 62 | struct mem_cgroup_reclaim_cookie { |
ef8f2327 | 63 | pg_data_t *pgdat; |
5660048c JW |
64 | int priority; |
65 | unsigned int generation; | |
66 | }; | |
67 | ||
241994ed JW |
68 | enum mem_cgroup_events_index { |
69 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ | |
70 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ | |
71 | MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ | |
72 | MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ | |
73 | MEM_CGROUP_EVENTS_NSTATS, | |
74 | /* default hierarchy events */ | |
75 | MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, | |
76 | MEMCG_HIGH, | |
77 | MEMCG_MAX, | |
78 | MEMCG_OOM, | |
79 | MEMCG_NR_EVENTS, | |
80 | }; | |
81 | ||
33398cf2 MH |
82 | /* |
83 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
84 | * it will be incremated by the number of pages. This counter is used for | |
85 | * for trigger some periodic events. This is straightforward and better | |
86 | * than using jiffies etc. to handle periodic memcg event. | |
87 | */ | |
88 | enum mem_cgroup_events_target { | |
89 | MEM_CGROUP_TARGET_THRESH, | |
90 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
91 | MEM_CGROUP_TARGET_NUMAINFO, | |
92 | MEM_CGROUP_NTARGETS, | |
93 | }; | |
94 | ||
c255a458 | 95 | #ifdef CONFIG_MEMCG |
23047a96 JW |
96 | |
97 | #define MEM_CGROUP_ID_SHIFT 16 | |
98 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
99 | ||
73f576c0 JW |
100 | struct mem_cgroup_id { |
101 | int id; | |
102 | atomic_t ref; | |
103 | }; | |
104 | ||
33398cf2 | 105 | struct mem_cgroup_stat_cpu { |
b2807f07 | 106 | long count[MEMCG_NR_STAT]; |
33398cf2 MH |
107 | unsigned long events[MEMCG_NR_EVENTS]; |
108 | unsigned long nr_page_events; | |
109 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
110 | }; | |
111 | ||
112 | struct mem_cgroup_reclaim_iter { | |
113 | struct mem_cgroup *position; | |
114 | /* scan generation, increased every round-trip */ | |
115 | unsigned int generation; | |
116 | }; | |
117 | ||
118 | /* | |
119 | * per-zone information in memory controller. | |
120 | */ | |
ef8f2327 | 121 | struct mem_cgroup_per_node { |
33398cf2 | 122 | struct lruvec lruvec; |
b4536f0c | 123 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
33398cf2 MH |
124 | |
125 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; | |
126 | ||
127 | struct rb_node tree_node; /* RB tree node */ | |
128 | unsigned long usage_in_excess;/* Set to the value by which */ | |
129 | /* the soft limit is exceeded*/ | |
130 | bool on_tree; | |
131 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
132 | /* use container_of */ | |
133 | }; | |
134 | ||
33398cf2 MH |
135 | struct mem_cgroup_threshold { |
136 | struct eventfd_ctx *eventfd; | |
137 | unsigned long threshold; | |
138 | }; | |
139 | ||
140 | /* For threshold */ | |
141 | struct mem_cgroup_threshold_ary { | |
142 | /* An array index points to threshold just below or equal to usage. */ | |
143 | int current_threshold; | |
144 | /* Size of entries[] */ | |
145 | unsigned int size; | |
146 | /* Array of thresholds */ | |
147 | struct mem_cgroup_threshold entries[0]; | |
148 | }; | |
149 | ||
150 | struct mem_cgroup_thresholds { | |
151 | /* Primary thresholds array */ | |
152 | struct mem_cgroup_threshold_ary *primary; | |
153 | /* | |
154 | * Spare threshold array. | |
155 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
156 | * It must be able to store at least primary->size - 1 entries. | |
157 | */ | |
158 | struct mem_cgroup_threshold_ary *spare; | |
159 | }; | |
160 | ||
567e9ab2 JW |
161 | enum memcg_kmem_state { |
162 | KMEM_NONE, | |
163 | KMEM_ALLOCATED, | |
164 | KMEM_ONLINE, | |
165 | }; | |
166 | ||
33398cf2 MH |
167 | /* |
168 | * The memory controller data structure. The memory controller controls both | |
169 | * page cache and RSS per cgroup. We would eventually like to provide | |
170 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
171 | * to help the administrator determine what knobs to tune. | |
172 | */ | |
173 | struct mem_cgroup { | |
174 | struct cgroup_subsys_state css; | |
175 | ||
73f576c0 JW |
176 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
177 | struct mem_cgroup_id id; | |
178 | ||
33398cf2 MH |
179 | /* Accounted resources */ |
180 | struct page_counter memory; | |
37e84351 | 181 | struct page_counter swap; |
0db15298 JW |
182 | |
183 | /* Legacy consumer-oriented counters */ | |
33398cf2 MH |
184 | struct page_counter memsw; |
185 | struct page_counter kmem; | |
0db15298 | 186 | struct page_counter tcpmem; |
33398cf2 MH |
187 | |
188 | /* Normal memory consumption range */ | |
189 | unsigned long low; | |
190 | unsigned long high; | |
191 | ||
f7e1cb6e JW |
192 | /* Range enforcement for interrupt charges */ |
193 | struct work_struct high_work; | |
194 | ||
33398cf2 MH |
195 | unsigned long soft_limit; |
196 | ||
197 | /* vmpressure notifications */ | |
198 | struct vmpressure vmpressure; | |
199 | ||
33398cf2 MH |
200 | /* |
201 | * Should the accounting and control be hierarchical, per subtree? | |
202 | */ | |
203 | bool use_hierarchy; | |
204 | ||
205 | /* protected by memcg_oom_lock */ | |
206 | bool oom_lock; | |
207 | int under_oom; | |
208 | ||
209 | int swappiness; | |
210 | /* OOM-Killer disable */ | |
211 | int oom_kill_disable; | |
212 | ||
472912a2 TH |
213 | /* handle for "memory.events" */ |
214 | struct cgroup_file events_file; | |
215 | ||
33398cf2 MH |
216 | /* protect arrays of thresholds */ |
217 | struct mutex thresholds_lock; | |
218 | ||
219 | /* thresholds for memory usage. RCU-protected */ | |
220 | struct mem_cgroup_thresholds thresholds; | |
221 | ||
222 | /* thresholds for mem+swap usage. RCU-protected */ | |
223 | struct mem_cgroup_thresholds memsw_thresholds; | |
224 | ||
225 | /* For oom notifier event fd */ | |
226 | struct list_head oom_notify; | |
227 | ||
228 | /* | |
229 | * Should we move charges of a task when a task is moved into this | |
230 | * mem_cgroup ? And what type of charges should we move ? | |
231 | */ | |
232 | unsigned long move_charge_at_immigrate; | |
233 | /* | |
234 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
235 | */ | |
236 | atomic_t moving_account; | |
237 | /* taken only while moving_account > 0 */ | |
238 | spinlock_t move_lock; | |
239 | struct task_struct *move_lock_task; | |
240 | unsigned long move_lock_flags; | |
241 | /* | |
242 | * percpu counter. | |
243 | */ | |
244 | struct mem_cgroup_stat_cpu __percpu *stat; | |
33398cf2 | 245 | |
d886f4e4 JW |
246 | unsigned long socket_pressure; |
247 | ||
248 | /* Legacy tcp memory accounting */ | |
0db15298 JW |
249 | bool tcpmem_active; |
250 | int tcpmem_pressure; | |
d886f4e4 | 251 | |
127424c8 | 252 | #ifndef CONFIG_SLOB |
33398cf2 MH |
253 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ |
254 | int kmemcg_id; | |
567e9ab2 | 255 | enum memcg_kmem_state kmem_state; |
33398cf2 MH |
256 | #endif |
257 | ||
258 | int last_scanned_node; | |
259 | #if MAX_NUMNODES > 1 | |
260 | nodemask_t scan_nodes; | |
261 | atomic_t numainfo_events; | |
262 | atomic_t numainfo_updating; | |
263 | #endif | |
264 | ||
265 | #ifdef CONFIG_CGROUP_WRITEBACK | |
266 | struct list_head cgwb_list; | |
267 | struct wb_domain cgwb_domain; | |
268 | #endif | |
269 | ||
270 | /* List of events which userspace want to receive */ | |
271 | struct list_head event_list; | |
272 | spinlock_t event_list_lock; | |
273 | ||
274 | struct mem_cgroup_per_node *nodeinfo[0]; | |
275 | /* WARNING: nodeinfo must be the last member here */ | |
276 | }; | |
7d828602 JW |
277 | |
278 | extern struct mem_cgroup *root_mem_cgroup; | |
56161634 | 279 | |
23047a96 JW |
280 | static inline bool mem_cgroup_disabled(void) |
281 | { | |
282 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
283 | } | |
284 | ||
33398cf2 MH |
285 | /** |
286 | * mem_cgroup_events - count memory events against a cgroup | |
287 | * @memcg: the memory cgroup | |
288 | * @idx: the event index | |
289 | * @nr: the number of events to account for | |
290 | */ | |
291 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | |
241994ed | 292 | enum mem_cgroup_events_index idx, |
33398cf2 MH |
293 | unsigned int nr) |
294 | { | |
295 | this_cpu_add(memcg->stat->events[idx], nr); | |
472912a2 | 296 | cgroup_file_notify(&memcg->events_file); |
33398cf2 | 297 | } |
241994ed JW |
298 | |
299 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | |
300 | ||
00501b53 | 301 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
f627c2f5 KS |
302 | gfp_t gfp_mask, struct mem_cgroup **memcgp, |
303 | bool compound); | |
00501b53 | 304 | void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, |
f627c2f5 KS |
305 | bool lrucare, bool compound); |
306 | void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, | |
307 | bool compound); | |
0a31bc97 | 308 | void mem_cgroup_uncharge(struct page *page); |
747db954 | 309 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
569b846d | 310 | |
6a93ca8f | 311 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); |
569b846d | 312 | |
ef8f2327 MG |
313 | static struct mem_cgroup_per_node * |
314 | mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) | |
55779ec7 | 315 | { |
ef8f2327 | 316 | return memcg->nodeinfo[nid]; |
55779ec7 JW |
317 | } |
318 | ||
319 | /** | |
a9dd0a83 MG |
320 | * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone |
321 | * @node: node of the wanted lruvec | |
55779ec7 JW |
322 | * @memcg: memcg of the wanted lruvec |
323 | * | |
a9dd0a83 MG |
324 | * Returns the lru list vector holding pages for a given @node or a given |
325 | * @memcg and @zone. This can be the node lruvec, if the memory controller | |
55779ec7 JW |
326 | * is disabled. |
327 | */ | |
a9dd0a83 | 328 | static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, |
ef8f2327 | 329 | struct mem_cgroup *memcg) |
55779ec7 | 330 | { |
ef8f2327 | 331 | struct mem_cgroup_per_node *mz; |
55779ec7 JW |
332 | struct lruvec *lruvec; |
333 | ||
334 | if (mem_cgroup_disabled()) { | |
a9dd0a83 | 335 | lruvec = node_lruvec(pgdat); |
55779ec7 JW |
336 | goto out; |
337 | } | |
338 | ||
ef8f2327 | 339 | mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); |
55779ec7 JW |
340 | lruvec = &mz->lruvec; |
341 | out: | |
342 | /* | |
343 | * Since a node can be onlined after the mem_cgroup was created, | |
599d0c95 | 344 | * we have to be prepared to initialize lruvec->pgdat here; |
55779ec7 JW |
345 | * and if offlined then reonlined, we need to reinitialize it. |
346 | */ | |
ef8f2327 MG |
347 | if (unlikely(lruvec->pgdat != pgdat)) |
348 | lruvec->pgdat = pgdat; | |
55779ec7 JW |
349 | return lruvec; |
350 | } | |
351 | ||
599d0c95 | 352 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); |
c9b0ed51 | 353 | |
2314b42d | 354 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
64219994 | 355 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e993d905 | 356 | |
33398cf2 MH |
357 | static inline |
358 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
359 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
360 | } | |
361 | ||
8e8ae645 JW |
362 | #define mem_cgroup_from_counter(counter, member) \ |
363 | container_of(counter, struct mem_cgroup, member) | |
364 | ||
33398cf2 MH |
365 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
366 | struct mem_cgroup *, | |
367 | struct mem_cgroup_reclaim_cookie *); | |
368 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
7c5f64f8 VD |
369 | int mem_cgroup_scan_tasks(struct mem_cgroup *, |
370 | int (*)(struct task_struct *, void *), void *); | |
33398cf2 | 371 | |
23047a96 JW |
372 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
373 | { | |
374 | if (mem_cgroup_disabled()) | |
375 | return 0; | |
376 | ||
73f576c0 | 377 | return memcg->id.id; |
23047a96 | 378 | } |
73f576c0 | 379 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
23047a96 | 380 | |
8e8ae645 JW |
381 | /** |
382 | * parent_mem_cgroup - find the accounting parent of a memcg | |
383 | * @memcg: memcg whose parent to find | |
384 | * | |
385 | * Returns the parent memcg, or NULL if this is the root or the memory | |
386 | * controller is in legacy no-hierarchy mode. | |
387 | */ | |
388 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
389 | { | |
390 | if (!memcg->memory.parent) | |
391 | return NULL; | |
392 | return mem_cgroup_from_counter(memcg->memory.parent, memory); | |
393 | } | |
394 | ||
33398cf2 MH |
395 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
396 | struct mem_cgroup *root) | |
397 | { | |
398 | if (root == memcg) | |
399 | return true; | |
400 | if (!root->use_hierarchy) | |
401 | return false; | |
402 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); | |
403 | } | |
e1aab161 | 404 | |
2314b42d JW |
405 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
406 | struct mem_cgroup *memcg) | |
2e4d4091 | 407 | { |
587af308 | 408 | struct mem_cgroup *task_memcg; |
413918bb | 409 | bool match = false; |
c3ac9a8a | 410 | |
2e4d4091 | 411 | rcu_read_lock(); |
587af308 | 412 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 413 | if (task_memcg) |
2314b42d | 414 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 415 | rcu_read_unlock(); |
c3ac9a8a | 416 | return match; |
2e4d4091 | 417 | } |
8a9f3ccd | 418 | |
64219994 | 419 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 420 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 421 | |
eb01aaab VD |
422 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
423 | { | |
424 | if (mem_cgroup_disabled()) | |
425 | return true; | |
426 | return !!(memcg->css.flags & CSS_ONLINE); | |
427 | } | |
428 | ||
58ae83db KH |
429 | /* |
430 | * For memory reclaim. | |
431 | */ | |
889976db | 432 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
33398cf2 MH |
433 | |
434 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
b4536f0c | 435 | int zid, int nr_pages); |
33398cf2 | 436 | |
0a6b76dd VD |
437 | unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, |
438 | int nid, unsigned int lru_mask); | |
439 | ||
33398cf2 MH |
440 | static inline |
441 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |
442 | { | |
ef8f2327 | 443 | struct mem_cgroup_per_node *mz; |
b4536f0c MH |
444 | unsigned long nr_pages = 0; |
445 | int zid; | |
33398cf2 | 446 | |
ef8f2327 | 447 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
b4536f0c MH |
448 | for (zid = 0; zid < MAX_NR_ZONES; zid++) |
449 | nr_pages += mz->lru_zone_size[zid][lru]; | |
450 | return nr_pages; | |
451 | } | |
452 | ||
453 | static inline | |
454 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
455 | enum lru_list lru, int zone_idx) | |
456 | { | |
457 | struct mem_cgroup_per_node *mz; | |
458 | ||
459 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
460 | return mz->lru_zone_size[zone_idx][lru]; | |
33398cf2 MH |
461 | } |
462 | ||
b23afb93 TH |
463 | void mem_cgroup_handle_over_high(void); |
464 | ||
7c5f64f8 VD |
465 | unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg); |
466 | ||
64219994 MH |
467 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
468 | struct task_struct *p); | |
58ae83db | 469 | |
49426420 | 470 | static inline void mem_cgroup_oom_enable(void) |
519e5247 | 471 | { |
626ebc41 TH |
472 | WARN_ON(current->memcg_may_oom); |
473 | current->memcg_may_oom = 1; | |
519e5247 JW |
474 | } |
475 | ||
49426420 | 476 | static inline void mem_cgroup_oom_disable(void) |
519e5247 | 477 | { |
626ebc41 TH |
478 | WARN_ON(!current->memcg_may_oom); |
479 | current->memcg_may_oom = 0; | |
519e5247 JW |
480 | } |
481 | ||
3812c8c8 JW |
482 | static inline bool task_in_memcg_oom(struct task_struct *p) |
483 | { | |
626ebc41 | 484 | return p->memcg_in_oom; |
3812c8c8 JW |
485 | } |
486 | ||
49426420 | 487 | bool mem_cgroup_oom_synchronize(bool wait); |
3812c8c8 | 488 | |
c255a458 | 489 | #ifdef CONFIG_MEMCG_SWAP |
c077719b KH |
490 | extern int do_swap_account; |
491 | #endif | |
f8d66542 | 492 | |
62cccb8c JW |
493 | void lock_page_memcg(struct page *page); |
494 | void unlock_page_memcg(struct page *page); | |
d7365e78 | 495 | |
33398cf2 MH |
496 | /** |
497 | * mem_cgroup_update_page_stat - update page state statistics | |
62cccb8c | 498 | * @page: the page |
33398cf2 MH |
499 | * @idx: page state item to account |
500 | * @val: number of pages (positive or negative) | |
501 | * | |
fdf1cdb9 JW |
502 | * The @page must be locked or the caller must use lock_page_memcg() |
503 | * to prevent double accounting when the page is concurrently being | |
504 | * moved to another memcg: | |
81f8c3a4 | 505 | * |
fdf1cdb9 | 506 | * lock_page(page) or lock_page_memcg(page) |
81f8c3a4 | 507 | * if (TestClearPageState(page)) |
62cccb8c | 508 | * mem_cgroup_update_page_stat(page, state, -1); |
fdf1cdb9 | 509 | * unlock_page(page) or unlock_page_memcg(page) |
33398cf2 | 510 | */ |
62cccb8c | 511 | static inline void mem_cgroup_update_page_stat(struct page *page, |
33398cf2 MH |
512 | enum mem_cgroup_stat_index idx, int val) |
513 | { | |
fdf1cdb9 | 514 | VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); |
33398cf2 | 515 | |
62cccb8c JW |
516 | if (page->mem_cgroup) |
517 | this_cpu_add(page->mem_cgroup->stat->count[idx], val); | |
33398cf2 MH |
518 | } |
519 | ||
62cccb8c | 520 | static inline void mem_cgroup_inc_page_stat(struct page *page, |
68b4876d | 521 | enum mem_cgroup_stat_index idx) |
2a7106f2 | 522 | { |
62cccb8c | 523 | mem_cgroup_update_page_stat(page, idx, 1); |
2a7106f2 GT |
524 | } |
525 | ||
62cccb8c | 526 | static inline void mem_cgroup_dec_page_stat(struct page *page, |
68b4876d | 527 | enum mem_cgroup_stat_index idx) |
2a7106f2 | 528 | { |
62cccb8c | 529 | mem_cgroup_update_page_stat(page, idx, -1); |
2a7106f2 GT |
530 | } |
531 | ||
ef8f2327 | 532 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
0608f43d AM |
533 | gfp_t gfp_mask, |
534 | unsigned long *total_scanned); | |
a63d83f4 | 535 | |
68ae564b DR |
536 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, |
537 | enum vm_event_item idx) | |
538 | { | |
33398cf2 MH |
539 | struct mem_cgroup *memcg; |
540 | ||
68ae564b DR |
541 | if (mem_cgroup_disabled()) |
542 | return; | |
33398cf2 MH |
543 | |
544 | rcu_read_lock(); | |
545 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
546 | if (unlikely(!memcg)) | |
547 | goto out; | |
548 | ||
549 | switch (idx) { | |
550 | case PGFAULT: | |
551 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); | |
552 | break; | |
553 | case PGMAJFAULT: | |
554 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); | |
555 | break; | |
556 | default: | |
557 | BUG(); | |
558 | } | |
559 | out: | |
560 | rcu_read_unlock(); | |
68ae564b | 561 | } |
ca3e0214 | 562 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e94c8a9c | 563 | void mem_cgroup_split_huge_fixup(struct page *head); |
ca3e0214 KH |
564 | #endif |
565 | ||
c255a458 | 566 | #else /* CONFIG_MEMCG */ |
23047a96 JW |
567 | |
568 | #define MEM_CGROUP_ID_SHIFT 0 | |
569 | #define MEM_CGROUP_ID_MAX 0 | |
570 | ||
7a81b88c KH |
571 | struct mem_cgroup; |
572 | ||
23047a96 JW |
573 | static inline bool mem_cgroup_disabled(void) |
574 | { | |
575 | return true; | |
576 | } | |
577 | ||
241994ed JW |
578 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, |
579 | enum mem_cgroup_events_index idx, | |
580 | unsigned int nr) | |
581 | { | |
582 | } | |
583 | ||
584 | static inline bool mem_cgroup_low(struct mem_cgroup *root, | |
585 | struct mem_cgroup *memcg) | |
586 | { | |
587 | return false; | |
588 | } | |
589 | ||
00501b53 JW |
590 | static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
591 | gfp_t gfp_mask, | |
f627c2f5 KS |
592 | struct mem_cgroup **memcgp, |
593 | bool compound) | |
7a81b88c | 594 | { |
00501b53 | 595 | *memcgp = NULL; |
7a81b88c KH |
596 | return 0; |
597 | } | |
598 | ||
00501b53 JW |
599 | static inline void mem_cgroup_commit_charge(struct page *page, |
600 | struct mem_cgroup *memcg, | |
f627c2f5 | 601 | bool lrucare, bool compound) |
7a81b88c KH |
602 | { |
603 | } | |
604 | ||
00501b53 | 605 | static inline void mem_cgroup_cancel_charge(struct page *page, |
f627c2f5 KS |
606 | struct mem_cgroup *memcg, |
607 | bool compound) | |
7a81b88c KH |
608 | { |
609 | } | |
610 | ||
0a31bc97 | 611 | static inline void mem_cgroup_uncharge(struct page *page) |
569b846d KH |
612 | { |
613 | } | |
614 | ||
747db954 | 615 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
616 | { |
617 | } | |
618 | ||
6a93ca8f | 619 | static inline void mem_cgroup_migrate(struct page *old, struct page *new) |
69029cd5 KH |
620 | { |
621 | } | |
622 | ||
a9dd0a83 | 623 | static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, |
ef8f2327 | 624 | struct mem_cgroup *memcg) |
08e552c6 | 625 | { |
a9dd0a83 | 626 | return node_lruvec(pgdat); |
08e552c6 KH |
627 | } |
628 | ||
fa9add64 | 629 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, |
599d0c95 | 630 | struct pglist_data *pgdat) |
66e1707b | 631 | { |
599d0c95 | 632 | return &pgdat->lruvec; |
66e1707b BS |
633 | } |
634 | ||
587af308 | 635 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 636 | struct mem_cgroup *memcg) |
bed7161a | 637 | { |
587af308 | 638 | return true; |
bed7161a BS |
639 | } |
640 | ||
ffbdccf5 DR |
641 | static inline bool task_in_mem_cgroup(struct task_struct *task, |
642 | const struct mem_cgroup *memcg) | |
4c4a2214 | 643 | { |
ffbdccf5 | 644 | return true; |
4c4a2214 DR |
645 | } |
646 | ||
5660048c JW |
647 | static inline struct mem_cgroup * |
648 | mem_cgroup_iter(struct mem_cgroup *root, | |
649 | struct mem_cgroup *prev, | |
650 | struct mem_cgroup_reclaim_cookie *reclaim) | |
651 | { | |
652 | return NULL; | |
653 | } | |
654 | ||
655 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
656 | struct mem_cgroup *prev) | |
657 | { | |
658 | } | |
659 | ||
7c5f64f8 VD |
660 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
661 | int (*fn)(struct task_struct *, void *), void *arg) | |
662 | { | |
663 | return 0; | |
664 | } | |
665 | ||
23047a96 | 666 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
f8d66542 | 667 | { |
23047a96 JW |
668 | return 0; |
669 | } | |
670 | ||
671 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
672 | { | |
673 | WARN_ON_ONCE(id); | |
674 | /* XXX: This should always return root_mem_cgroup */ | |
675 | return NULL; | |
f8d66542 | 676 | } |
a636b327 | 677 | |
eb01aaab | 678 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
14797e23 | 679 | { |
13308ca9 | 680 | return true; |
14797e23 KM |
681 | } |
682 | ||
a3d8e054 | 683 | static inline unsigned long |
4d7dcca2 | 684 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
a3d8e054 KM |
685 | { |
686 | return 0; | |
687 | } | |
b4536f0c MH |
688 | static inline |
689 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
690 | enum lru_list lru, int zone_idx) | |
691 | { | |
692 | return 0; | |
693 | } | |
a3d8e054 | 694 | |
0a6b76dd VD |
695 | static inline unsigned long |
696 | mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | |
697 | int nid, unsigned int lru_mask) | |
698 | { | |
699 | return 0; | |
700 | } | |
701 | ||
7c5f64f8 VD |
702 | static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) |
703 | { | |
704 | return 0; | |
705 | } | |
706 | ||
e222432b BS |
707 | static inline void |
708 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |
709 | { | |
710 | } | |
711 | ||
62cccb8c | 712 | static inline void lock_page_memcg(struct page *page) |
89c06bd5 KH |
713 | { |
714 | } | |
715 | ||
62cccb8c | 716 | static inline void unlock_page_memcg(struct page *page) |
89c06bd5 KH |
717 | { |
718 | } | |
719 | ||
b23afb93 TH |
720 | static inline void mem_cgroup_handle_over_high(void) |
721 | { | |
722 | } | |
723 | ||
49426420 | 724 | static inline void mem_cgroup_oom_enable(void) |
519e5247 JW |
725 | { |
726 | } | |
727 | ||
49426420 | 728 | static inline void mem_cgroup_oom_disable(void) |
519e5247 JW |
729 | { |
730 | } | |
731 | ||
3812c8c8 JW |
732 | static inline bool task_in_memcg_oom(struct task_struct *p) |
733 | { | |
734 | return false; | |
735 | } | |
736 | ||
49426420 | 737 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
738 | { |
739 | return false; | |
740 | } | |
741 | ||
fac9b95a JW |
742 | static inline void mem_cgroup_update_page_stat(struct page *page, |
743 | enum mem_cgroup_stat_index idx, | |
744 | int nr) | |
745 | { | |
746 | } | |
747 | ||
62cccb8c | 748 | static inline void mem_cgroup_inc_page_stat(struct page *page, |
68b4876d | 749 | enum mem_cgroup_stat_index idx) |
2a7106f2 GT |
750 | { |
751 | } | |
752 | ||
62cccb8c | 753 | static inline void mem_cgroup_dec_page_stat(struct page *page, |
68b4876d | 754 | enum mem_cgroup_stat_index idx) |
d69b042f BS |
755 | { |
756 | } | |
757 | ||
4e416953 | 758 | static inline |
ef8f2327 | 759 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
0608f43d AM |
760 | gfp_t gfp_mask, |
761 | unsigned long *total_scanned) | |
4e416953 | 762 | { |
0608f43d | 763 | return 0; |
4e416953 BS |
764 | } |
765 | ||
e94c8a9c | 766 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
ca3e0214 KH |
767 | { |
768 | } | |
769 | ||
456f998e YH |
770 | static inline |
771 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | |
772 | { | |
773 | } | |
c255a458 | 774 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 775 | |
52ebea74 | 776 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 777 | |
52ebea74 | 778 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); |
841710aa | 779 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
780 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
781 | unsigned long *pheadroom, unsigned long *pdirty, | |
782 | unsigned long *pwriteback); | |
841710aa TH |
783 | |
784 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
785 | ||
786 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
787 | { | |
788 | return NULL; | |
789 | } | |
790 | ||
c2aa723a | 791 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
792 | unsigned long *pfilepages, |
793 | unsigned long *pheadroom, | |
c2aa723a TH |
794 | unsigned long *pdirty, |
795 | unsigned long *pwriteback) | |
796 | { | |
797 | } | |
798 | ||
841710aa | 799 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 800 | |
e1aab161 | 801 | struct sock; |
baac50bb JW |
802 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
803 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
d886f4e4 | 804 | #ifdef CONFIG_MEMCG |
ef12947c JW |
805 | extern struct static_key_false memcg_sockets_enabled_key; |
806 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
2d758073 JW |
807 | void mem_cgroup_sk_alloc(struct sock *sk); |
808 | void mem_cgroup_sk_free(struct sock *sk); | |
baac50bb | 809 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 810 | { |
0db15298 | 811 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
8e8ae645 | 812 | return true; |
8e8ae645 JW |
813 | do { |
814 | if (time_before(jiffies, memcg->socket_pressure)) | |
815 | return true; | |
816 | } while ((memcg = parent_mem_cgroup(memcg))); | |
817 | return false; | |
e805605c JW |
818 | } |
819 | #else | |
80e95fe0 | 820 | #define mem_cgroup_sockets_enabled 0 |
2d758073 JW |
821 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
822 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
baac50bb | 823 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
824 | { |
825 | return false; | |
826 | } | |
827 | #endif | |
7ae1e1d0 | 828 | |
45264778 VD |
829 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); |
830 | void memcg_kmem_put_cache(struct kmem_cache *cachep); | |
831 | int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | |
832 | struct mem_cgroup *memcg); | |
833 | int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | |
834 | void memcg_kmem_uncharge(struct page *page, int order); | |
835 | ||
127424c8 | 836 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
ef12947c | 837 | extern struct static_key_false memcg_kmem_enabled_key; |
749c5415 | 838 | |
dbcf73e2 | 839 | extern int memcg_nr_cache_ids; |
64219994 MH |
840 | void memcg_get_cache_ids(void); |
841 | void memcg_put_cache_ids(void); | |
ebe945c2 GC |
842 | |
843 | /* | |
844 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
845 | * check if the cache is valid (it is either valid or NULL). | |
846 | * the slab_mutex must be held when looping through those caches | |
847 | */ | |
749c5415 | 848 | #define for_each_memcg_cache_index(_idx) \ |
dbcf73e2 | 849 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c5415 | 850 | |
7ae1e1d0 GC |
851 | static inline bool memcg_kmem_enabled(void) |
852 | { | |
ef12947c | 853 | return static_branch_unlikely(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
854 | } |
855 | ||
33398cf2 | 856 | /* |
9f706d68 | 857 | * helper for accessing a memcg's index. It will be used as an index in the |
33398cf2 MH |
858 | * child cache array in kmem_cache, and also to derive its name. This function |
859 | * will return -1 when this is not a kmem-limited memcg. | |
860 | */ | |
861 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
862 | { | |
863 | return memcg ? memcg->kmemcg_id : -1; | |
864 | } | |
5722d094 | 865 | |
27ee57c9 VD |
866 | /** |
867 | * memcg_kmem_update_page_stat - update kmem page state statistics | |
868 | * @page: the page | |
869 | * @idx: page state item to account | |
870 | * @val: number of pages (positive or negative) | |
871 | */ | |
872 | static inline void memcg_kmem_update_page_stat(struct page *page, | |
873 | enum mem_cgroup_stat_index idx, int val) | |
874 | { | |
875 | if (memcg_kmem_enabled() && page->mem_cgroup) | |
876 | this_cpu_add(page->mem_cgroup->stat->count[idx], val); | |
877 | } | |
878 | ||
7ae1e1d0 | 879 | #else |
749c5415 GC |
880 | #define for_each_memcg_cache_index(_idx) \ |
881 | for (; NULL; ) | |
882 | ||
b9ce5ef4 GC |
883 | static inline bool memcg_kmem_enabled(void) |
884 | { | |
885 | return false; | |
886 | } | |
887 | ||
2633d7a0 GC |
888 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
889 | { | |
890 | return -1; | |
891 | } | |
892 | ||
05257a1a VD |
893 | static inline void memcg_get_cache_ids(void) |
894 | { | |
895 | } | |
896 | ||
897 | static inline void memcg_put_cache_ids(void) | |
898 | { | |
899 | } | |
900 | ||
27ee57c9 VD |
901 | static inline void memcg_kmem_update_page_stat(struct page *page, |
902 | enum mem_cgroup_stat_index idx, int val) | |
903 | { | |
904 | } | |
127424c8 JW |
905 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
906 | ||
8cdea7c0 | 907 | #endif /* _LINUX_MEMCONTROL_H */ |