]>
Commit | Line | Data |
---|---|---|
1 | /* memcontrol.h - Memory Controller | |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
6 | * Copyright 2007 OpenVZ SWsoft Inc | |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | |
21 | #define _LINUX_MEMCONTROL_H | |
22 | #include <linux/cgroup.h> | |
23 | #include <linux/vm_event_item.h> | |
24 | #include <linux/hardirq.h> | |
25 | #include <linux/jump_label.h> | |
26 | #include <linux/page_counter.h> | |
27 | #include <linux/vmpressure.h> | |
28 | #include <linux/eventfd.h> | |
29 | #include <linux/mmzone.h> | |
30 | #include <linux/writeback.h> | |
31 | #include <linux/page-flags.h> | |
32 | ||
33 | struct mem_cgroup; | |
34 | struct page; | |
35 | struct mm_struct; | |
36 | struct kmem_cache; | |
37 | ||
38 | /* Cgroup-specific page state, on top of universal node page state */ | |
39 | enum memcg_stat_item { | |
40 | MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, | |
41 | MEMCG_RSS, | |
42 | MEMCG_RSS_HUGE, | |
43 | MEMCG_SWAP, | |
44 | MEMCG_SOCK, | |
45 | /* XXX: why are these zone and not node counters? */ | |
46 | MEMCG_KERNEL_STACK_KB, | |
47 | MEMCG_NR_STAT, | |
48 | }; | |
49 | ||
50 | /* Cgroup-specific events, on top of universal VM events */ | |
51 | enum memcg_event_item { | |
52 | MEMCG_LOW = NR_VM_EVENT_ITEMS, | |
53 | MEMCG_HIGH, | |
54 | MEMCG_MAX, | |
55 | MEMCG_OOM, | |
56 | MEMCG_NR_EVENTS, | |
57 | }; | |
58 | ||
59 | struct mem_cgroup_reclaim_cookie { | |
60 | pg_data_t *pgdat; | |
61 | int priority; | |
62 | unsigned int generation; | |
63 | }; | |
64 | ||
65 | #ifdef CONFIG_MEMCG | |
66 | ||
67 | #define MEM_CGROUP_ID_SHIFT 16 | |
68 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
69 | ||
70 | struct mem_cgroup_id { | |
71 | int id; | |
72 | atomic_t ref; | |
73 | }; | |
74 | ||
75 | /* | |
76 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
77 | * it will be incremated by the number of pages. This counter is used for | |
78 | * for trigger some periodic events. This is straightforward and better | |
79 | * than using jiffies etc. to handle periodic memcg event. | |
80 | */ | |
81 | enum mem_cgroup_events_target { | |
82 | MEM_CGROUP_TARGET_THRESH, | |
83 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
84 | MEM_CGROUP_TARGET_NUMAINFO, | |
85 | MEM_CGROUP_NTARGETS, | |
86 | }; | |
87 | ||
88 | struct mem_cgroup_stat_cpu { | |
89 | long count[MEMCG_NR_STAT]; | |
90 | unsigned long events[MEMCG_NR_EVENTS]; | |
91 | unsigned long nr_page_events; | |
92 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
93 | }; | |
94 | ||
95 | struct mem_cgroup_reclaim_iter { | |
96 | struct mem_cgroup *position; | |
97 | /* scan generation, increased every round-trip */ | |
98 | unsigned int generation; | |
99 | }; | |
100 | ||
101 | /* | |
102 | * per-zone information in memory controller. | |
103 | */ | |
104 | struct mem_cgroup_per_node { | |
105 | struct lruvec lruvec; | |
106 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; | |
107 | ||
108 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; | |
109 | ||
110 | struct rb_node tree_node; /* RB tree node */ | |
111 | unsigned long usage_in_excess;/* Set to the value by which */ | |
112 | /* the soft limit is exceeded*/ | |
113 | bool on_tree; | |
114 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
115 | /* use container_of */ | |
116 | }; | |
117 | ||
118 | struct mem_cgroup_threshold { | |
119 | struct eventfd_ctx *eventfd; | |
120 | unsigned long threshold; | |
121 | }; | |
122 | ||
123 | /* For threshold */ | |
124 | struct mem_cgroup_threshold_ary { | |
125 | /* An array index points to threshold just below or equal to usage. */ | |
126 | int current_threshold; | |
127 | /* Size of entries[] */ | |
128 | unsigned int size; | |
129 | /* Array of thresholds */ | |
130 | struct mem_cgroup_threshold entries[0]; | |
131 | }; | |
132 | ||
133 | struct mem_cgroup_thresholds { | |
134 | /* Primary thresholds array */ | |
135 | struct mem_cgroup_threshold_ary *primary; | |
136 | /* | |
137 | * Spare threshold array. | |
138 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
139 | * It must be able to store at least primary->size - 1 entries. | |
140 | */ | |
141 | struct mem_cgroup_threshold_ary *spare; | |
142 | }; | |
143 | ||
144 | enum memcg_kmem_state { | |
145 | KMEM_NONE, | |
146 | KMEM_ALLOCATED, | |
147 | KMEM_ONLINE, | |
148 | }; | |
149 | ||
150 | /* | |
151 | * The memory controller data structure. The memory controller controls both | |
152 | * page cache and RSS per cgroup. We would eventually like to provide | |
153 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
154 | * to help the administrator determine what knobs to tune. | |
155 | */ | |
156 | struct mem_cgroup { | |
157 | struct cgroup_subsys_state css; | |
158 | ||
159 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ | |
160 | struct mem_cgroup_id id; | |
161 | ||
162 | /* Accounted resources */ | |
163 | struct page_counter memory; | |
164 | struct page_counter swap; | |
165 | ||
166 | /* Legacy consumer-oriented counters */ | |
167 | struct page_counter memsw; | |
168 | struct page_counter kmem; | |
169 | struct page_counter tcpmem; | |
170 | ||
171 | /* Normal memory consumption range */ | |
172 | unsigned long low; | |
173 | unsigned long high; | |
174 | ||
175 | /* Range enforcement for interrupt charges */ | |
176 | struct work_struct high_work; | |
177 | ||
178 | unsigned long soft_limit; | |
179 | ||
180 | /* vmpressure notifications */ | |
181 | struct vmpressure vmpressure; | |
182 | ||
183 | /* | |
184 | * Should the accounting and control be hierarchical, per subtree? | |
185 | */ | |
186 | bool use_hierarchy; | |
187 | ||
188 | /* protected by memcg_oom_lock */ | |
189 | bool oom_lock; | |
190 | int under_oom; | |
191 | ||
192 | int swappiness; | |
193 | /* OOM-Killer disable */ | |
194 | int oom_kill_disable; | |
195 | ||
196 | /* handle for "memory.events" */ | |
197 | struct cgroup_file events_file; | |
198 | ||
199 | /* protect arrays of thresholds */ | |
200 | struct mutex thresholds_lock; | |
201 | ||
202 | /* thresholds for memory usage. RCU-protected */ | |
203 | struct mem_cgroup_thresholds thresholds; | |
204 | ||
205 | /* thresholds for mem+swap usage. RCU-protected */ | |
206 | struct mem_cgroup_thresholds memsw_thresholds; | |
207 | ||
208 | /* For oom notifier event fd */ | |
209 | struct list_head oom_notify; | |
210 | ||
211 | /* | |
212 | * Should we move charges of a task when a task is moved into this | |
213 | * mem_cgroup ? And what type of charges should we move ? | |
214 | */ | |
215 | unsigned long move_charge_at_immigrate; | |
216 | /* | |
217 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
218 | */ | |
219 | atomic_t moving_account; | |
220 | /* taken only while moving_account > 0 */ | |
221 | spinlock_t move_lock; | |
222 | struct task_struct *move_lock_task; | |
223 | unsigned long move_lock_flags; | |
224 | /* | |
225 | * percpu counter. | |
226 | */ | |
227 | struct mem_cgroup_stat_cpu __percpu *stat; | |
228 | ||
229 | unsigned long socket_pressure; | |
230 | ||
231 | /* Legacy tcp memory accounting */ | |
232 | bool tcpmem_active; | |
233 | int tcpmem_pressure; | |
234 | ||
235 | #ifndef CONFIG_SLOB | |
236 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ | |
237 | int kmemcg_id; | |
238 | enum memcg_kmem_state kmem_state; | |
239 | struct list_head kmem_caches; | |
240 | #endif | |
241 | ||
242 | int last_scanned_node; | |
243 | #if MAX_NUMNODES > 1 | |
244 | nodemask_t scan_nodes; | |
245 | atomic_t numainfo_events; | |
246 | atomic_t numainfo_updating; | |
247 | #endif | |
248 | ||
249 | #ifdef CONFIG_CGROUP_WRITEBACK | |
250 | struct list_head cgwb_list; | |
251 | struct wb_domain cgwb_domain; | |
252 | #endif | |
253 | ||
254 | /* List of events which userspace want to receive */ | |
255 | struct list_head event_list; | |
256 | spinlock_t event_list_lock; | |
257 | ||
258 | struct mem_cgroup_per_node *nodeinfo[0]; | |
259 | /* WARNING: nodeinfo must be the last member here */ | |
260 | }; | |
261 | ||
262 | extern struct mem_cgroup *root_mem_cgroup; | |
263 | ||
264 | static inline bool mem_cgroup_disabled(void) | |
265 | { | |
266 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
267 | } | |
268 | ||
269 | static inline void mem_cgroup_event(struct mem_cgroup *memcg, | |
270 | enum memcg_event_item event) | |
271 | { | |
272 | this_cpu_inc(memcg->stat->events[event]); | |
273 | cgroup_file_notify(&memcg->events_file); | |
274 | } | |
275 | ||
276 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | |
277 | ||
278 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | |
279 | gfp_t gfp_mask, struct mem_cgroup **memcgp, | |
280 | bool compound); | |
281 | void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, | |
282 | bool lrucare, bool compound); | |
283 | void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, | |
284 | bool compound); | |
285 | void mem_cgroup_uncharge(struct page *page); | |
286 | void mem_cgroup_uncharge_list(struct list_head *page_list); | |
287 | ||
288 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); | |
289 | ||
290 | static struct mem_cgroup_per_node * | |
291 | mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) | |
292 | { | |
293 | return memcg->nodeinfo[nid]; | |
294 | } | |
295 | ||
296 | /** | |
297 | * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone | |
298 | * @node: node of the wanted lruvec | |
299 | * @memcg: memcg of the wanted lruvec | |
300 | * | |
301 | * Returns the lru list vector holding pages for a given @node or a given | |
302 | * @memcg and @zone. This can be the node lruvec, if the memory controller | |
303 | * is disabled. | |
304 | */ | |
305 | static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, | |
306 | struct mem_cgroup *memcg) | |
307 | { | |
308 | struct mem_cgroup_per_node *mz; | |
309 | struct lruvec *lruvec; | |
310 | ||
311 | if (mem_cgroup_disabled()) { | |
312 | lruvec = node_lruvec(pgdat); | |
313 | goto out; | |
314 | } | |
315 | ||
316 | mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); | |
317 | lruvec = &mz->lruvec; | |
318 | out: | |
319 | /* | |
320 | * Since a node can be onlined after the mem_cgroup was created, | |
321 | * we have to be prepared to initialize lruvec->pgdat here; | |
322 | * and if offlined then reonlined, we need to reinitialize it. | |
323 | */ | |
324 | if (unlikely(lruvec->pgdat != pgdat)) | |
325 | lruvec->pgdat = pgdat; | |
326 | return lruvec; | |
327 | } | |
328 | ||
329 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); | |
330 | ||
331 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); | |
332 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | |
333 | ||
334 | static inline | |
335 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
336 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
337 | } | |
338 | ||
339 | #define mem_cgroup_from_counter(counter, member) \ | |
340 | container_of(counter, struct mem_cgroup, member) | |
341 | ||
342 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | |
343 | struct mem_cgroup *, | |
344 | struct mem_cgroup_reclaim_cookie *); | |
345 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
346 | int mem_cgroup_scan_tasks(struct mem_cgroup *, | |
347 | int (*)(struct task_struct *, void *), void *); | |
348 | ||
349 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) | |
350 | { | |
351 | if (mem_cgroup_disabled()) | |
352 | return 0; | |
353 | ||
354 | return memcg->id.id; | |
355 | } | |
356 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); | |
357 | ||
358 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | |
359 | { | |
360 | struct mem_cgroup_per_node *mz; | |
361 | ||
362 | if (mem_cgroup_disabled()) | |
363 | return NULL; | |
364 | ||
365 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
366 | return mz->memcg; | |
367 | } | |
368 | ||
369 | /** | |
370 | * parent_mem_cgroup - find the accounting parent of a memcg | |
371 | * @memcg: memcg whose parent to find | |
372 | * | |
373 | * Returns the parent memcg, or NULL if this is the root or the memory | |
374 | * controller is in legacy no-hierarchy mode. | |
375 | */ | |
376 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
377 | { | |
378 | if (!memcg->memory.parent) | |
379 | return NULL; | |
380 | return mem_cgroup_from_counter(memcg->memory.parent, memory); | |
381 | } | |
382 | ||
383 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, | |
384 | struct mem_cgroup *root) | |
385 | { | |
386 | if (root == memcg) | |
387 | return true; | |
388 | if (!root->use_hierarchy) | |
389 | return false; | |
390 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); | |
391 | } | |
392 | ||
393 | static inline bool mm_match_cgroup(struct mm_struct *mm, | |
394 | struct mem_cgroup *memcg) | |
395 | { | |
396 | struct mem_cgroup *task_memcg; | |
397 | bool match = false; | |
398 | ||
399 | rcu_read_lock(); | |
400 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
401 | if (task_memcg) | |
402 | match = mem_cgroup_is_descendant(task_memcg, memcg); | |
403 | rcu_read_unlock(); | |
404 | return match; | |
405 | } | |
406 | ||
407 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); | |
408 | ino_t page_cgroup_ino(struct page *page); | |
409 | ||
410 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) | |
411 | { | |
412 | if (mem_cgroup_disabled()) | |
413 | return true; | |
414 | return !!(memcg->css.flags & CSS_ONLINE); | |
415 | } | |
416 | ||
417 | /* | |
418 | * For memory reclaim. | |
419 | */ | |
420 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | |
421 | ||
422 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
423 | int zid, int nr_pages); | |
424 | ||
425 | unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | |
426 | int nid, unsigned int lru_mask); | |
427 | ||
428 | static inline | |
429 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |
430 | { | |
431 | struct mem_cgroup_per_node *mz; | |
432 | unsigned long nr_pages = 0; | |
433 | int zid; | |
434 | ||
435 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
436 | for (zid = 0; zid < MAX_NR_ZONES; zid++) | |
437 | nr_pages += mz->lru_zone_size[zid][lru]; | |
438 | return nr_pages; | |
439 | } | |
440 | ||
441 | static inline | |
442 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
443 | enum lru_list lru, int zone_idx) | |
444 | { | |
445 | struct mem_cgroup_per_node *mz; | |
446 | ||
447 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
448 | return mz->lru_zone_size[zone_idx][lru]; | |
449 | } | |
450 | ||
451 | void mem_cgroup_handle_over_high(void); | |
452 | ||
453 | unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg); | |
454 | ||
455 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | |
456 | struct task_struct *p); | |
457 | ||
458 | static inline void mem_cgroup_oom_enable(void) | |
459 | { | |
460 | WARN_ON(current->memcg_may_oom); | |
461 | current->memcg_may_oom = 1; | |
462 | } | |
463 | ||
464 | static inline void mem_cgroup_oom_disable(void) | |
465 | { | |
466 | WARN_ON(!current->memcg_may_oom); | |
467 | current->memcg_may_oom = 0; | |
468 | } | |
469 | ||
470 | static inline bool task_in_memcg_oom(struct task_struct *p) | |
471 | { | |
472 | return p->memcg_in_oom; | |
473 | } | |
474 | ||
475 | bool mem_cgroup_oom_synchronize(bool wait); | |
476 | ||
477 | #ifdef CONFIG_MEMCG_SWAP | |
478 | extern int do_swap_account; | |
479 | #endif | |
480 | ||
481 | void lock_page_memcg(struct page *page); | |
482 | void unlock_page_memcg(struct page *page); | |
483 | ||
484 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | |
485 | enum memcg_stat_item idx) | |
486 | { | |
487 | long val = 0; | |
488 | int cpu; | |
489 | ||
490 | for_each_possible_cpu(cpu) | |
491 | val += per_cpu(memcg->stat->count[idx], cpu); | |
492 | ||
493 | if (val < 0) | |
494 | val = 0; | |
495 | ||
496 | return val; | |
497 | } | |
498 | ||
499 | static inline void mod_memcg_state(struct mem_cgroup *memcg, | |
500 | enum memcg_stat_item idx, int val) | |
501 | { | |
502 | if (!mem_cgroup_disabled()) | |
503 | this_cpu_add(memcg->stat->count[idx], val); | |
504 | } | |
505 | ||
506 | static inline void inc_memcg_state(struct mem_cgroup *memcg, | |
507 | enum memcg_stat_item idx) | |
508 | { | |
509 | mod_memcg_state(memcg, idx, 1); | |
510 | } | |
511 | ||
512 | static inline void dec_memcg_state(struct mem_cgroup *memcg, | |
513 | enum memcg_stat_item idx) | |
514 | { | |
515 | mod_memcg_state(memcg, idx, -1); | |
516 | } | |
517 | ||
518 | /** | |
519 | * mod_memcg_page_state - update page state statistics | |
520 | * @page: the page | |
521 | * @idx: page state item to account | |
522 | * @val: number of pages (positive or negative) | |
523 | * | |
524 | * The @page must be locked or the caller must use lock_page_memcg() | |
525 | * to prevent double accounting when the page is concurrently being | |
526 | * moved to another memcg: | |
527 | * | |
528 | * lock_page(page) or lock_page_memcg(page) | |
529 | * if (TestClearPageState(page)) | |
530 | * mod_memcg_page_state(page, state, -1); | |
531 | * unlock_page(page) or unlock_page_memcg(page) | |
532 | * | |
533 | * Kernel pages are an exception to this, since they'll never move. | |
534 | */ | |
535 | static inline void mod_memcg_page_state(struct page *page, | |
536 | enum memcg_stat_item idx, int val) | |
537 | { | |
538 | if (page->mem_cgroup) | |
539 | mod_memcg_state(page->mem_cgroup, idx, val); | |
540 | } | |
541 | ||
542 | static inline void inc_memcg_page_state(struct page *page, | |
543 | enum memcg_stat_item idx) | |
544 | { | |
545 | mod_memcg_page_state(page, idx, 1); | |
546 | } | |
547 | ||
548 | static inline void dec_memcg_page_state(struct page *page, | |
549 | enum memcg_stat_item idx) | |
550 | { | |
551 | mod_memcg_page_state(page, idx, -1); | |
552 | } | |
553 | ||
554 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |
555 | gfp_t gfp_mask, | |
556 | unsigned long *total_scanned); | |
557 | ||
558 | static inline void count_memcg_events(struct mem_cgroup *memcg, | |
559 | enum vm_event_item idx, | |
560 | unsigned long count) | |
561 | { | |
562 | if (!mem_cgroup_disabled()) | |
563 | this_cpu_add(memcg->stat->events[idx], count); | |
564 | } | |
565 | ||
566 | static inline void count_memcg_page_event(struct page *page, | |
567 | enum memcg_stat_item idx) | |
568 | { | |
569 | if (page->mem_cgroup) | |
570 | count_memcg_events(page->mem_cgroup, idx, 1); | |
571 | } | |
572 | ||
573 | static inline void count_memcg_event_mm(struct mm_struct *mm, | |
574 | enum vm_event_item idx) | |
575 | { | |
576 | struct mem_cgroup *memcg; | |
577 | ||
578 | if (mem_cgroup_disabled()) | |
579 | return; | |
580 | ||
581 | rcu_read_lock(); | |
582 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
583 | if (likely(memcg)) { | |
584 | this_cpu_inc(memcg->stat->events[idx]); | |
585 | if (idx == OOM_KILL) | |
586 | cgroup_file_notify(&memcg->events_file); | |
587 | } | |
588 | rcu_read_unlock(); | |
589 | } | |
590 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
591 | void mem_cgroup_split_huge_fixup(struct page *head); | |
592 | #endif | |
593 | ||
594 | #else /* CONFIG_MEMCG */ | |
595 | ||
596 | #define MEM_CGROUP_ID_SHIFT 0 | |
597 | #define MEM_CGROUP_ID_MAX 0 | |
598 | ||
599 | struct mem_cgroup; | |
600 | ||
601 | static inline bool mem_cgroup_disabled(void) | |
602 | { | |
603 | return true; | |
604 | } | |
605 | ||
606 | static inline void mem_cgroup_event(struct mem_cgroup *memcg, | |
607 | enum memcg_event_item event) | |
608 | { | |
609 | } | |
610 | ||
611 | static inline bool mem_cgroup_low(struct mem_cgroup *root, | |
612 | struct mem_cgroup *memcg) | |
613 | { | |
614 | return false; | |
615 | } | |
616 | ||
617 | static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | |
618 | gfp_t gfp_mask, | |
619 | struct mem_cgroup **memcgp, | |
620 | bool compound) | |
621 | { | |
622 | *memcgp = NULL; | |
623 | return 0; | |
624 | } | |
625 | ||
626 | static inline void mem_cgroup_commit_charge(struct page *page, | |
627 | struct mem_cgroup *memcg, | |
628 | bool lrucare, bool compound) | |
629 | { | |
630 | } | |
631 | ||
632 | static inline void mem_cgroup_cancel_charge(struct page *page, | |
633 | struct mem_cgroup *memcg, | |
634 | bool compound) | |
635 | { | |
636 | } | |
637 | ||
638 | static inline void mem_cgroup_uncharge(struct page *page) | |
639 | { | |
640 | } | |
641 | ||
642 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) | |
643 | { | |
644 | } | |
645 | ||
646 | static inline void mem_cgroup_migrate(struct page *old, struct page *new) | |
647 | { | |
648 | } | |
649 | ||
650 | static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, | |
651 | struct mem_cgroup *memcg) | |
652 | { | |
653 | return node_lruvec(pgdat); | |
654 | } | |
655 | ||
656 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, | |
657 | struct pglist_data *pgdat) | |
658 | { | |
659 | return &pgdat->lruvec; | |
660 | } | |
661 | ||
662 | static inline bool mm_match_cgroup(struct mm_struct *mm, | |
663 | struct mem_cgroup *memcg) | |
664 | { | |
665 | return true; | |
666 | } | |
667 | ||
668 | static inline bool task_in_mem_cgroup(struct task_struct *task, | |
669 | const struct mem_cgroup *memcg) | |
670 | { | |
671 | return true; | |
672 | } | |
673 | ||
674 | static inline struct mem_cgroup * | |
675 | mem_cgroup_iter(struct mem_cgroup *root, | |
676 | struct mem_cgroup *prev, | |
677 | struct mem_cgroup_reclaim_cookie *reclaim) | |
678 | { | |
679 | return NULL; | |
680 | } | |
681 | ||
682 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
683 | struct mem_cgroup *prev) | |
684 | { | |
685 | } | |
686 | ||
687 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, | |
688 | int (*fn)(struct task_struct *, void *), void *arg) | |
689 | { | |
690 | return 0; | |
691 | } | |
692 | ||
693 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) | |
694 | { | |
695 | return 0; | |
696 | } | |
697 | ||
698 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
699 | { | |
700 | WARN_ON_ONCE(id); | |
701 | /* XXX: This should always return root_mem_cgroup */ | |
702 | return NULL; | |
703 | } | |
704 | ||
705 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | |
706 | { | |
707 | return NULL; | |
708 | } | |
709 | ||
710 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) | |
711 | { | |
712 | return true; | |
713 | } | |
714 | ||
715 | static inline unsigned long | |
716 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |
717 | { | |
718 | return 0; | |
719 | } | |
720 | static inline | |
721 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
722 | enum lru_list lru, int zone_idx) | |
723 | { | |
724 | return 0; | |
725 | } | |
726 | ||
727 | static inline unsigned long | |
728 | mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | |
729 | int nid, unsigned int lru_mask) | |
730 | { | |
731 | return 0; | |
732 | } | |
733 | ||
734 | static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) | |
735 | { | |
736 | return 0; | |
737 | } | |
738 | ||
739 | static inline void | |
740 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |
741 | { | |
742 | } | |
743 | ||
744 | static inline void lock_page_memcg(struct page *page) | |
745 | { | |
746 | } | |
747 | ||
748 | static inline void unlock_page_memcg(struct page *page) | |
749 | { | |
750 | } | |
751 | ||
752 | static inline void mem_cgroup_handle_over_high(void) | |
753 | { | |
754 | } | |
755 | ||
756 | static inline void mem_cgroup_oom_enable(void) | |
757 | { | |
758 | } | |
759 | ||
760 | static inline void mem_cgroup_oom_disable(void) | |
761 | { | |
762 | } | |
763 | ||
764 | static inline bool task_in_memcg_oom(struct task_struct *p) | |
765 | { | |
766 | return false; | |
767 | } | |
768 | ||
769 | static inline bool mem_cgroup_oom_synchronize(bool wait) | |
770 | { | |
771 | return false; | |
772 | } | |
773 | ||
774 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | |
775 | enum memcg_stat_item idx) | |
776 | { | |
777 | return 0; | |
778 | } | |
779 | ||
780 | static inline void mod_memcg_state(struct mem_cgroup *memcg, | |
781 | enum memcg_stat_item idx, | |
782 | int nr) | |
783 | { | |
784 | } | |
785 | ||
786 | static inline void inc_memcg_state(struct mem_cgroup *memcg, | |
787 | enum memcg_stat_item idx) | |
788 | { | |
789 | } | |
790 | ||
791 | static inline void dec_memcg_state(struct mem_cgroup *memcg, | |
792 | enum memcg_stat_item idx) | |
793 | { | |
794 | } | |
795 | ||
796 | static inline void mod_memcg_page_state(struct page *page, | |
797 | enum memcg_stat_item idx, | |
798 | int nr) | |
799 | { | |
800 | } | |
801 | ||
802 | static inline void inc_memcg_page_state(struct page *page, | |
803 | enum memcg_stat_item idx) | |
804 | { | |
805 | } | |
806 | ||
807 | static inline void dec_memcg_page_state(struct page *page, | |
808 | enum memcg_stat_item idx) | |
809 | { | |
810 | } | |
811 | ||
812 | static inline | |
813 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |
814 | gfp_t gfp_mask, | |
815 | unsigned long *total_scanned) | |
816 | { | |
817 | return 0; | |
818 | } | |
819 | ||
820 | static inline void mem_cgroup_split_huge_fixup(struct page *head) | |
821 | { | |
822 | } | |
823 | ||
824 | static inline void count_memcg_events(struct mem_cgroup *memcg, | |
825 | enum vm_event_item idx, | |
826 | unsigned long count) | |
827 | { | |
828 | } | |
829 | ||
830 | static inline void count_memcg_page_event(struct page *page, | |
831 | enum memcg_stat_item idx) | |
832 | { | |
833 | } | |
834 | ||
835 | static inline | |
836 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) | |
837 | { | |
838 | } | |
839 | #endif /* CONFIG_MEMCG */ | |
840 | ||
841 | #ifdef CONFIG_CGROUP_WRITEBACK | |
842 | ||
843 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); | |
844 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); | |
845 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, | |
846 | unsigned long *pheadroom, unsigned long *pdirty, | |
847 | unsigned long *pwriteback); | |
848 | ||
849 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
850 | ||
851 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
852 | { | |
853 | return NULL; | |
854 | } | |
855 | ||
856 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, | |
857 | unsigned long *pfilepages, | |
858 | unsigned long *pheadroom, | |
859 | unsigned long *pdirty, | |
860 | unsigned long *pwriteback) | |
861 | { | |
862 | } | |
863 | ||
864 | #endif /* CONFIG_CGROUP_WRITEBACK */ | |
865 | ||
866 | struct sock; | |
867 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
868 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
869 | #ifdef CONFIG_MEMCG | |
870 | extern struct static_key_false memcg_sockets_enabled_key; | |
871 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
872 | void mem_cgroup_sk_alloc(struct sock *sk); | |
873 | void mem_cgroup_sk_free(struct sock *sk); | |
874 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |
875 | { | |
876 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) | |
877 | return true; | |
878 | do { | |
879 | if (time_before(jiffies, memcg->socket_pressure)) | |
880 | return true; | |
881 | } while ((memcg = parent_mem_cgroup(memcg))); | |
882 | return false; | |
883 | } | |
884 | #else | |
885 | #define mem_cgroup_sockets_enabled 0 | |
886 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; | |
887 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
888 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |
889 | { | |
890 | return false; | |
891 | } | |
892 | #endif | |
893 | ||
894 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); | |
895 | void memcg_kmem_put_cache(struct kmem_cache *cachep); | |
896 | int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | |
897 | struct mem_cgroup *memcg); | |
898 | int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | |
899 | void memcg_kmem_uncharge(struct page *page, int order); | |
900 | ||
901 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) | |
902 | extern struct static_key_false memcg_kmem_enabled_key; | |
903 | extern struct workqueue_struct *memcg_kmem_cache_wq; | |
904 | ||
905 | extern int memcg_nr_cache_ids; | |
906 | void memcg_get_cache_ids(void); | |
907 | void memcg_put_cache_ids(void); | |
908 | ||
909 | /* | |
910 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
911 | * check if the cache is valid (it is either valid or NULL). | |
912 | * the slab_mutex must be held when looping through those caches | |
913 | */ | |
914 | #define for_each_memcg_cache_index(_idx) \ | |
915 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) | |
916 | ||
917 | static inline bool memcg_kmem_enabled(void) | |
918 | { | |
919 | return static_branch_unlikely(&memcg_kmem_enabled_key); | |
920 | } | |
921 | ||
922 | /* | |
923 | * helper for accessing a memcg's index. It will be used as an index in the | |
924 | * child cache array in kmem_cache, and also to derive its name. This function | |
925 | * will return -1 when this is not a kmem-limited memcg. | |
926 | */ | |
927 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
928 | { | |
929 | return memcg ? memcg->kmemcg_id : -1; | |
930 | } | |
931 | ||
932 | #else | |
933 | #define for_each_memcg_cache_index(_idx) \ | |
934 | for (; NULL; ) | |
935 | ||
936 | static inline bool memcg_kmem_enabled(void) | |
937 | { | |
938 | return false; | |
939 | } | |
940 | ||
941 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
942 | { | |
943 | return -1; | |
944 | } | |
945 | ||
946 | static inline void memcg_get_cache_ids(void) | |
947 | { | |
948 | } | |
949 | ||
950 | static inline void memcg_put_cache_ids(void) | |
951 | { | |
952 | } | |
953 | ||
954 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ | |
955 | ||
956 | #endif /* _LINUX_MEMCONTROL_H */ |