1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
31 /* Cgroup-specific page state, on top of universal node page state */
32 enum memcg_stat_item
{
33 MEMCG_SWAP
= NR_VM_NODE_STAT_ITEMS
,
39 enum memcg_memory_event
{
48 MEMCG_NR_MEMORY_EVENTS
,
51 struct mem_cgroup_reclaim_cookie
{
53 unsigned int generation
;
58 #define MEM_CGROUP_ID_SHIFT 16
59 #define MEM_CGROUP_ID_MAX USHRT_MAX
61 struct mem_cgroup_id
{
67 * Per memcg event counter is incremented at every pagein/pageout. With THP,
68 * it will be incremented by the number of pages. This counter is used
69 * to trigger some periodic events. This is straightforward and better
70 * than using jiffies etc. to handle periodic memcg event.
72 enum mem_cgroup_events_target
{
73 MEM_CGROUP_TARGET_THRESH
,
74 MEM_CGROUP_TARGET_SOFTLIMIT
,
78 struct memcg_vmstats_percpu
{
79 long stat
[MEMCG_NR_STAT
];
80 unsigned long events
[NR_VM_EVENT_ITEMS
];
81 unsigned long nr_page_events
;
82 unsigned long targets
[MEM_CGROUP_NTARGETS
];
85 struct mem_cgroup_reclaim_iter
{
86 struct mem_cgroup
*position
;
87 /* scan generation, increased every round-trip */
88 unsigned int generation
;
92 long count
[NR_VM_NODE_STAT_ITEMS
];
95 struct batched_lruvec_stat
{
96 s32 count
[NR_VM_NODE_STAT_ITEMS
];
100 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
101 * which have elements charged to this memcg.
103 struct memcg_shrinker_map
{
109 * per-node information in memory controller.
111 struct mem_cgroup_per_node
{
112 struct lruvec lruvec
;
115 * Legacy local VM stats. This should be struct lruvec_stat and
116 * cannot be optimized to struct batched_lruvec_stat. Because
117 * the threshold of the lruvec_stat_cpu can be as big as
118 * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
119 * filed has no upper limit.
121 struct lruvec_stat __percpu
*lruvec_stat_local
;
123 /* Subtree VM stats (batched updates) */
124 struct batched_lruvec_stat __percpu
*lruvec_stat_cpu
;
125 atomic_long_t lruvec_stat
[NR_VM_NODE_STAT_ITEMS
];
127 unsigned long lru_zone_size
[MAX_NR_ZONES
][NR_LRU_LISTS
];
129 struct mem_cgroup_reclaim_iter iter
;
131 struct memcg_shrinker_map __rcu
*shrinker_map
;
133 struct rb_node tree_node
; /* RB tree node */
134 unsigned long usage_in_excess
;/* Set to the value by which */
135 /* the soft limit is exceeded*/
137 struct mem_cgroup
*memcg
; /* Back pointer, we cannot */
138 /* use container_of */
141 struct mem_cgroup_threshold
{
142 struct eventfd_ctx
*eventfd
;
143 unsigned long threshold
;
147 struct mem_cgroup_threshold_ary
{
148 /* An array index points to threshold just below or equal to usage. */
149 int current_threshold
;
150 /* Size of entries[] */
152 /* Array of thresholds */
153 struct mem_cgroup_threshold entries
[];
156 struct mem_cgroup_thresholds
{
157 /* Primary thresholds array */
158 struct mem_cgroup_threshold_ary
*primary
;
160 * Spare threshold array.
161 * This is needed to make mem_cgroup_unregister_event() "never fail".
162 * It must be able to store at least primary->size - 1 entries.
164 struct mem_cgroup_threshold_ary
*spare
;
167 enum memcg_kmem_state
{
173 #if defined(CONFIG_SMP)
174 struct memcg_padding
{
176 } ____cacheline_internodealigned_in_smp
;
177 #define MEMCG_PADDING(name) struct memcg_padding name;
179 #define MEMCG_PADDING(name)
183 * Remember four most recent foreign writebacks with dirty pages in this
184 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
185 * one in a given round, we're likely to catch it later if it keeps
186 * foreign-dirtying, so a fairly low count should be enough.
188 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
190 #define MEMCG_CGWB_FRN_CNT 4
192 struct memcg_cgwb_frn
{
193 u64 bdi_id
; /* bdi->id of the foreign inode */
194 int memcg_id
; /* memcg->css.id of foreign inode */
195 u64 at
; /* jiffies_64 at the time of dirtying */
196 struct wb_completion done
; /* tracks in-flight foreign writebacks */
200 * Bucket for arbitrarily byte-sized objects charged to a memory
201 * cgroup. The bucket can be reparented in one piece when the cgroup
202 * is destroyed, without having to round up the individual references
203 * of all live memory objects in the wild.
206 struct percpu_ref refcnt
;
207 struct mem_cgroup
*memcg
;
208 atomic_t nr_charged_bytes
;
210 struct list_head list
;
216 * The memory controller data structure. The memory controller controls both
217 * page cache and RSS per cgroup. We would eventually like to provide
218 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
219 * to help the administrator determine what knobs to tune.
222 struct cgroup_subsys_state css
;
224 /* Private memcg ID. Used to ID objects that outlive the cgroup */
225 struct mem_cgroup_id id
;
227 /* Accounted resources */
228 struct page_counter memory
; /* Both v1 & v2 */
231 struct page_counter swap
; /* v2 only */
232 struct page_counter memsw
; /* v1 only */
235 /* Legacy consumer-oriented counters */
236 struct page_counter kmem
; /* v1 only */
237 struct page_counter tcpmem
; /* v1 only */
239 /* Range enforcement for interrupt charges */
240 struct work_struct high_work
;
242 unsigned long soft_limit
;
244 /* vmpressure notifications */
245 struct vmpressure vmpressure
;
248 * Should the OOM killer kill all belonging tasks, had it kill one?
252 /* protected by memcg_oom_lock */
257 /* OOM-Killer disable */
258 int oom_kill_disable
;
260 /* memory.events and memory.events.local */
261 struct cgroup_file events_file
;
262 struct cgroup_file events_local_file
;
264 /* handle for "memory.swap.events" */
265 struct cgroup_file swap_events_file
;
267 /* protect arrays of thresholds */
268 struct mutex thresholds_lock
;
270 /* thresholds for memory usage. RCU-protected */
271 struct mem_cgroup_thresholds thresholds
;
273 /* thresholds for mem+swap usage. RCU-protected */
274 struct mem_cgroup_thresholds memsw_thresholds
;
276 /* For oom notifier event fd */
277 struct list_head oom_notify
;
280 * Should we move charges of a task when a task is moved into this
281 * mem_cgroup ? And what type of charges should we move ?
283 unsigned long move_charge_at_immigrate
;
284 /* taken only while moving_account > 0 */
285 spinlock_t move_lock
;
286 unsigned long move_lock_flags
;
288 MEMCG_PADDING(_pad1_
);
290 atomic_long_t vmstats
[MEMCG_NR_STAT
];
291 atomic_long_t vmevents
[NR_VM_EVENT_ITEMS
];
294 atomic_long_t memory_events
[MEMCG_NR_MEMORY_EVENTS
];
295 atomic_long_t memory_events_local
[MEMCG_NR_MEMORY_EVENTS
];
297 unsigned long socket_pressure
;
299 /* Legacy tcp memory accounting */
303 #ifdef CONFIG_MEMCG_KMEM
305 enum memcg_kmem_state kmem_state
;
306 struct obj_cgroup __rcu
*objcg
;
307 struct list_head objcg_list
; /* list of inherited objcgs */
310 MEMCG_PADDING(_pad2_
);
313 * set > 0 if pages under this cgroup are moving to other cgroup.
315 atomic_t moving_account
;
316 struct task_struct
*move_lock_task
;
318 /* Legacy local VM stats and events */
319 struct memcg_vmstats_percpu __percpu
*vmstats_local
;
321 /* Subtree VM stats and events (batched updates) */
322 struct memcg_vmstats_percpu __percpu
*vmstats_percpu
;
324 #ifdef CONFIG_CGROUP_WRITEBACK
325 struct list_head cgwb_list
;
326 struct wb_domain cgwb_domain
;
327 struct memcg_cgwb_frn cgwb_frn
[MEMCG_CGWB_FRN_CNT
];
330 /* List of events which userspace want to receive */
331 struct list_head event_list
;
332 spinlock_t event_list_lock
;
334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
335 struct deferred_split deferred_split_queue
;
338 struct mem_cgroup_per_node
*nodeinfo
[0];
339 /* WARNING: nodeinfo must be the last member here */
343 * size of first charge trial. "32" comes from vmscan.c's magic value.
344 * TODO: maybe necessary to use big numbers in big irons.
346 #define MEMCG_CHARGE_BATCH 32U
348 extern struct mem_cgroup
*root_mem_cgroup
;
350 enum page_memcg_data_flags
{
351 /* page->memcg_data is a pointer to an objcgs vector */
352 MEMCG_DATA_OBJCGS
= (1UL << 0),
353 /* page has been accounted as a non-slab kernel page */
354 MEMCG_DATA_KMEM
= (1UL << 1),
355 /* the next bit after the last actual flag */
356 __NR_MEMCG_DATA_FLAGS
= (1UL << 2),
359 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
362 * page_memcg - get the memory cgroup associated with a page
363 * @page: a pointer to the page struct
365 * Returns a pointer to the memory cgroup associated with the page,
366 * or NULL. This function assumes that the page is known to have a
367 * proper memory cgroup pointer. It's not safe to call this function
368 * against some type of pages, e.g. slab pages or ex-slab pages.
370 * Any of the following ensures page and memcg binding stability:
373 * - lock_page_memcg()
374 * - exclusive reference
376 static inline struct mem_cgroup
*page_memcg(struct page
*page
)
378 unsigned long memcg_data
= page
->memcg_data
;
380 VM_BUG_ON_PAGE(PageSlab(page
), page
);
381 VM_BUG_ON_PAGE(memcg_data
& MEMCG_DATA_OBJCGS
, page
);
383 return (struct mem_cgroup
*)(memcg_data
& ~MEMCG_DATA_FLAGS_MASK
);
387 * page_memcg_rcu - locklessly get the memory cgroup associated with a page
388 * @page: a pointer to the page struct
390 * Returns a pointer to the memory cgroup associated with the page,
391 * or NULL. This function assumes that the page is known to have a
392 * proper memory cgroup pointer. It's not safe to call this function
393 * against some type of pages, e.g. slab pages or ex-slab pages.
395 static inline struct mem_cgroup
*page_memcg_rcu(struct page
*page
)
397 VM_BUG_ON_PAGE(PageSlab(page
), page
);
398 WARN_ON_ONCE(!rcu_read_lock_held());
400 return (struct mem_cgroup
*)(READ_ONCE(page
->memcg_data
) &
401 ~MEMCG_DATA_FLAGS_MASK
);
405 * page_memcg_check - get the memory cgroup associated with a page
406 * @page: a pointer to the page struct
408 * Returns a pointer to the memory cgroup associated with the page,
409 * or NULL. This function unlike page_memcg() can take any page
410 * as an argument. It has to be used in cases when it's not known if a page
411 * has an associated memory cgroup pointer or an object cgroups vector.
413 * Any of the following ensures page and memcg binding stability:
416 * - lock_page_memcg()
417 * - exclusive reference
419 static inline struct mem_cgroup
*page_memcg_check(struct page
*page
)
422 * Because page->memcg_data might be changed asynchronously
423 * for slab pages, READ_ONCE() should be used here.
425 unsigned long memcg_data
= READ_ONCE(page
->memcg_data
);
427 if (memcg_data
& MEMCG_DATA_OBJCGS
)
430 return (struct mem_cgroup
*)(memcg_data
& ~MEMCG_DATA_FLAGS_MASK
);
434 * PageMemcgKmem - check if the page has MemcgKmem flag set
435 * @page: a pointer to the page struct
437 * Checks if the page has MemcgKmem flag set. The caller must ensure that
438 * the page has an associated memory cgroup. It's not safe to call this function
439 * against some types of pages, e.g. slab pages.
441 static inline bool PageMemcgKmem(struct page
*page
)
443 VM_BUG_ON_PAGE(page
->memcg_data
& MEMCG_DATA_OBJCGS
, page
);
444 return page
->memcg_data
& MEMCG_DATA_KMEM
;
447 #ifdef CONFIG_MEMCG_KMEM
449 * page_objcgs - get the object cgroups vector associated with a page
450 * @page: a pointer to the page struct
452 * Returns a pointer to the object cgroups vector associated with the page,
453 * or NULL. This function assumes that the page is known to have an
454 * associated object cgroups vector. It's not safe to call this function
455 * against pages, which might have an associated memory cgroup: e.g.
456 * kernel stack pages.
458 static inline struct obj_cgroup
**page_objcgs(struct page
*page
)
460 unsigned long memcg_data
= READ_ONCE(page
->memcg_data
);
462 VM_BUG_ON_PAGE(memcg_data
&& !(memcg_data
& MEMCG_DATA_OBJCGS
), page
);
463 VM_BUG_ON_PAGE(memcg_data
& MEMCG_DATA_KMEM
, page
);
465 return (struct obj_cgroup
**)(memcg_data
& ~MEMCG_DATA_FLAGS_MASK
);
469 * page_objcgs_check - get the object cgroups vector associated with a page
470 * @page: a pointer to the page struct
472 * Returns a pointer to the object cgroups vector associated with the page,
473 * or NULL. This function is safe to use if the page can be directly associated
474 * with a memory cgroup.
476 static inline struct obj_cgroup
**page_objcgs_check(struct page
*page
)
478 unsigned long memcg_data
= READ_ONCE(page
->memcg_data
);
480 if (!memcg_data
|| !(memcg_data
& MEMCG_DATA_OBJCGS
))
483 VM_BUG_ON_PAGE(memcg_data
& MEMCG_DATA_KMEM
, page
);
485 return (struct obj_cgroup
**)(memcg_data
& ~MEMCG_DATA_FLAGS_MASK
);
489 static inline struct obj_cgroup
**page_objcgs(struct page
*page
)
494 static inline struct obj_cgroup
**page_objcgs_check(struct page
*page
)
500 static __always_inline
bool memcg_stat_item_in_bytes(int idx
)
502 if (idx
== MEMCG_PERCPU_B
)
504 return vmstat_item_in_bytes(idx
);
507 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
509 return (memcg
== root_mem_cgroup
);
512 static inline bool mem_cgroup_disabled(void)
514 return !cgroup_subsys_enabled(memory_cgrp_subsys
);
517 static inline unsigned long mem_cgroup_protection(struct mem_cgroup
*root
,
518 struct mem_cgroup
*memcg
,
521 if (mem_cgroup_disabled())
525 * There is no reclaim protection applied to a targeted reclaim.
526 * We are special casing this specific case here because
527 * mem_cgroup_protected calculation is not robust enough to keep
528 * the protection invariant for calculated effective values for
529 * parallel reclaimers with different reclaim target. This is
530 * especially a problem for tail memcgs (as they have pages on LRU)
531 * which would want to have effective values 0 for targeted reclaim
532 * but a different value for external reclaim.
535 * Let's have global and A's reclaim in parallel:
537 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
539 * | C (low = 1G, usage = 2.5G)
540 * B (low = 1G, usage = 0.5G)
542 * For the global reclaim
544 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
545 * C.elow = min(C.usage, C.low)
547 * With the effective values resetting we have A reclaim
552 * If the global reclaim races with A's reclaim then
553 * B.elow = C.elow = 0 because children_low_usage > A.elow)
554 * is possible and reclaiming B would be violating the protection.
561 return READ_ONCE(memcg
->memory
.emin
);
563 return max(READ_ONCE(memcg
->memory
.emin
),
564 READ_ONCE(memcg
->memory
.elow
));
567 void mem_cgroup_calculate_protection(struct mem_cgroup
*root
,
568 struct mem_cgroup
*memcg
);
570 static inline bool mem_cgroup_supports_protection(struct mem_cgroup
*memcg
)
573 * The root memcg doesn't account charges, and doesn't support
576 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg
);
580 static inline bool mem_cgroup_below_low(struct mem_cgroup
*memcg
)
582 if (!mem_cgroup_supports_protection(memcg
))
585 return READ_ONCE(memcg
->memory
.elow
) >=
586 page_counter_read(&memcg
->memory
);
589 static inline bool mem_cgroup_below_min(struct mem_cgroup
*memcg
)
591 if (!mem_cgroup_supports_protection(memcg
))
594 return READ_ONCE(memcg
->memory
.emin
) >=
595 page_counter_read(&memcg
->memory
);
598 int mem_cgroup_charge(struct page
*page
, struct mm_struct
*mm
, gfp_t gfp_mask
);
600 void mem_cgroup_uncharge(struct page
*page
);
601 void mem_cgroup_uncharge_list(struct list_head
*page_list
);
603 void mem_cgroup_migrate(struct page
*oldpage
, struct page
*newpage
);
605 static struct mem_cgroup_per_node
*
606 mem_cgroup_nodeinfo(struct mem_cgroup
*memcg
, int nid
)
608 return memcg
->nodeinfo
[nid
];
612 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
613 * @memcg: memcg of the wanted lruvec
614 * @pgdat: pglist_data
616 * Returns the lru list vector holding pages for a given @memcg &
617 * @pgdat combination. This can be the node lruvec, if the memory
618 * controller is disabled.
620 static inline struct lruvec
*mem_cgroup_lruvec(struct mem_cgroup
*memcg
,
621 struct pglist_data
*pgdat
)
623 struct mem_cgroup_per_node
*mz
;
624 struct lruvec
*lruvec
;
626 if (mem_cgroup_disabled()) {
627 lruvec
= &pgdat
->__lruvec
;
632 memcg
= root_mem_cgroup
;
634 mz
= mem_cgroup_nodeinfo(memcg
, pgdat
->node_id
);
635 lruvec
= &mz
->lruvec
;
638 * Since a node can be onlined after the mem_cgroup was created,
639 * we have to be prepared to initialize lruvec->pgdat here;
640 * and if offlined then reonlined, we need to reinitialize it.
642 if (unlikely(lruvec
->pgdat
!= pgdat
))
643 lruvec
->pgdat
= pgdat
;
648 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
650 * @pgdat: pgdat of the page
652 * This function relies on page->mem_cgroup being stable.
654 static inline struct lruvec
*mem_cgroup_page_lruvec(struct page
*page
,
655 struct pglist_data
*pgdat
)
657 struct mem_cgroup
*memcg
= page_memcg(page
);
659 VM_WARN_ON_ONCE_PAGE(!memcg
&& !mem_cgroup_disabled(), page
);
660 return mem_cgroup_lruvec(memcg
, pgdat
);
663 static inline bool lruvec_holds_page_lru_lock(struct page
*page
,
664 struct lruvec
*lruvec
)
666 pg_data_t
*pgdat
= page_pgdat(page
);
667 const struct mem_cgroup
*memcg
;
668 struct mem_cgroup_per_node
*mz
;
670 if (mem_cgroup_disabled())
671 return lruvec
== &pgdat
->__lruvec
;
673 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
674 memcg
= page_memcg(page
) ? : root_mem_cgroup
;
676 return lruvec
->pgdat
== pgdat
&& mz
->memcg
== memcg
;
679 struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
681 struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
);
683 struct mem_cgroup
*get_mem_cgroup_from_page(struct page
*page
);
685 struct lruvec
*lock_page_lruvec(struct page
*page
);
686 struct lruvec
*lock_page_lruvec_irq(struct page
*page
);
687 struct lruvec
*lock_page_lruvec_irqsave(struct page
*page
,
688 unsigned long *flags
);
690 #ifdef CONFIG_DEBUG_VM
691 void lruvec_memcg_debug(struct lruvec
*lruvec
, struct page
*page
);
693 static inline void lruvec_memcg_debug(struct lruvec
*lruvec
, struct page
*page
)
699 struct mem_cgroup
*mem_cgroup_from_css(struct cgroup_subsys_state
*css
){
700 return css
? container_of(css
, struct mem_cgroup
, css
) : NULL
;
703 static inline bool obj_cgroup_tryget(struct obj_cgroup
*objcg
)
705 return percpu_ref_tryget(&objcg
->refcnt
);
708 static inline void obj_cgroup_get(struct obj_cgroup
*objcg
)
710 percpu_ref_get(&objcg
->refcnt
);
713 static inline void obj_cgroup_put(struct obj_cgroup
*objcg
)
715 percpu_ref_put(&objcg
->refcnt
);
719 * After the initialization objcg->memcg is always pointing at
720 * a valid memcg, but can be atomically swapped to the parent memcg.
722 * The caller must ensure that the returned memcg won't be released:
723 * e.g. acquire the rcu_read_lock or css_set_lock.
725 static inline struct mem_cgroup
*obj_cgroup_memcg(struct obj_cgroup
*objcg
)
727 return READ_ONCE(objcg
->memcg
);
730 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
733 css_put(&memcg
->css
);
736 #define mem_cgroup_from_counter(counter, member) \
737 container_of(counter, struct mem_cgroup, member)
739 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*,
741 struct mem_cgroup_reclaim_cookie
*);
742 void mem_cgroup_iter_break(struct mem_cgroup
*, struct mem_cgroup
*);
743 int mem_cgroup_scan_tasks(struct mem_cgroup
*,
744 int (*)(struct task_struct
*, void *), void *);
746 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
748 if (mem_cgroup_disabled())
753 struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
);
755 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
757 return mem_cgroup_from_css(seq_css(m
));
760 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
762 struct mem_cgroup_per_node
*mz
;
764 if (mem_cgroup_disabled())
767 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
772 * parent_mem_cgroup - find the accounting parent of a memcg
773 * @memcg: memcg whose parent to find
775 * Returns the parent memcg, or NULL if this is the root or the memory
776 * controller is in legacy no-hierarchy mode.
778 static inline struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
780 if (!memcg
->memory
.parent
)
782 return mem_cgroup_from_counter(memcg
->memory
.parent
, memory
);
785 static inline bool mem_cgroup_is_descendant(struct mem_cgroup
*memcg
,
786 struct mem_cgroup
*root
)
790 return cgroup_is_descendant(memcg
->css
.cgroup
, root
->css
.cgroup
);
793 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
794 struct mem_cgroup
*memcg
)
796 struct mem_cgroup
*task_memcg
;
800 task_memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
802 match
= mem_cgroup_is_descendant(task_memcg
, memcg
);
807 struct cgroup_subsys_state
*mem_cgroup_css_from_page(struct page
*page
);
808 ino_t
page_cgroup_ino(struct page
*page
);
810 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
812 if (mem_cgroup_disabled())
814 return !!(memcg
->css
.flags
& CSS_ONLINE
);
818 * For memory reclaim.
820 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
822 void mem_cgroup_update_lru_size(struct lruvec
*lruvec
, enum lru_list lru
,
823 int zid
, int nr_pages
);
826 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
827 enum lru_list lru
, int zone_idx
)
829 struct mem_cgroup_per_node
*mz
;
831 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
832 return READ_ONCE(mz
->lru_zone_size
[zone_idx
][lru
]);
835 void mem_cgroup_handle_over_high(void);
837 unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
);
839 unsigned long mem_cgroup_size(struct mem_cgroup
*memcg
);
841 void mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
,
842 struct task_struct
*p
);
844 void mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
);
846 static inline void mem_cgroup_enter_user_fault(void)
848 WARN_ON(current
->in_user_fault
);
849 current
->in_user_fault
= 1;
852 static inline void mem_cgroup_exit_user_fault(void)
854 WARN_ON(!current
->in_user_fault
);
855 current
->in_user_fault
= 0;
858 static inline bool task_in_memcg_oom(struct task_struct
*p
)
860 return p
->memcg_in_oom
;
863 bool mem_cgroup_oom_synchronize(bool wait
);
864 struct mem_cgroup
*mem_cgroup_get_oom_group(struct task_struct
*victim
,
865 struct mem_cgroup
*oom_domain
);
866 void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
);
868 #ifdef CONFIG_MEMCG_SWAP
869 extern bool cgroup_memory_noswap
;
872 struct mem_cgroup
*lock_page_memcg(struct page
*page
);
873 void __unlock_page_memcg(struct mem_cgroup
*memcg
);
874 void unlock_page_memcg(struct page
*page
);
877 * idx can be of type enum memcg_stat_item or node_stat_item.
878 * Keep in sync with memcg_exact_page_state().
880 static inline unsigned long memcg_page_state(struct mem_cgroup
*memcg
, int idx
)
882 long x
= atomic_long_read(&memcg
->vmstats
[idx
]);
891 * idx can be of type enum memcg_stat_item or node_stat_item.
892 * Keep in sync with memcg_exact_page_state().
894 static inline unsigned long memcg_page_state_local(struct mem_cgroup
*memcg
,
900 for_each_possible_cpu(cpu
)
901 x
+= per_cpu(memcg
->vmstats_local
->stat
[idx
], cpu
);
909 void __mod_memcg_state(struct mem_cgroup
*memcg
, int idx
, int val
);
911 /* idx can be of type enum memcg_stat_item or node_stat_item */
912 static inline void mod_memcg_state(struct mem_cgroup
*memcg
,
917 local_irq_save(flags
);
918 __mod_memcg_state(memcg
, idx
, val
);
919 local_irq_restore(flags
);
922 static inline unsigned long lruvec_page_state(struct lruvec
*lruvec
,
923 enum node_stat_item idx
)
925 struct mem_cgroup_per_node
*pn
;
928 if (mem_cgroup_disabled())
929 return node_page_state(lruvec_pgdat(lruvec
), idx
);
931 pn
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
932 x
= atomic_long_read(&pn
->lruvec_stat
[idx
]);
940 static inline unsigned long lruvec_page_state_local(struct lruvec
*lruvec
,
941 enum node_stat_item idx
)
943 struct mem_cgroup_per_node
*pn
;
947 if (mem_cgroup_disabled())
948 return node_page_state(lruvec_pgdat(lruvec
), idx
);
950 pn
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
951 for_each_possible_cpu(cpu
)
952 x
+= per_cpu(pn
->lruvec_stat_local
->count
[idx
], cpu
);
960 void __mod_memcg_lruvec_state(struct lruvec
*lruvec
, enum node_stat_item idx
,
962 void __mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
, int val
);
964 static inline void mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
969 local_irq_save(flags
);
970 __mod_lruvec_kmem_state(p
, idx
, val
);
971 local_irq_restore(flags
);
974 static inline void mod_memcg_lruvec_state(struct lruvec
*lruvec
,
975 enum node_stat_item idx
, int val
)
979 local_irq_save(flags
);
980 __mod_memcg_lruvec_state(lruvec
, idx
, val
);
981 local_irq_restore(flags
);
984 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
986 unsigned long *total_scanned
);
988 void __count_memcg_events(struct mem_cgroup
*memcg
, enum vm_event_item idx
,
989 unsigned long count
);
991 static inline void count_memcg_events(struct mem_cgroup
*memcg
,
992 enum vm_event_item idx
,
997 local_irq_save(flags
);
998 __count_memcg_events(memcg
, idx
, count
);
999 local_irq_restore(flags
);
1002 static inline void count_memcg_page_event(struct page
*page
,
1003 enum vm_event_item idx
)
1005 struct mem_cgroup
*memcg
= page_memcg(page
);
1008 count_memcg_events(memcg
, idx
, 1);
1011 static inline void count_memcg_event_mm(struct mm_struct
*mm
,
1012 enum vm_event_item idx
)
1014 struct mem_cgroup
*memcg
;
1016 if (mem_cgroup_disabled())
1020 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
1022 count_memcg_events(memcg
, idx
, 1);
1026 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
1027 enum memcg_memory_event event
)
1029 bool swap_event
= event
== MEMCG_SWAP_HIGH
|| event
== MEMCG_SWAP_MAX
||
1030 event
== MEMCG_SWAP_FAIL
;
1032 atomic_long_inc(&memcg
->memory_events_local
[event
]);
1034 cgroup_file_notify(&memcg
->events_local_file
);
1037 atomic_long_inc(&memcg
->memory_events
[event
]);
1039 cgroup_file_notify(&memcg
->swap_events_file
);
1041 cgroup_file_notify(&memcg
->events_file
);
1043 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
))
1045 if (cgrp_dfl_root
.flags
& CGRP_ROOT_MEMORY_LOCAL_EVENTS
)
1047 } while ((memcg
= parent_mem_cgroup(memcg
)) &&
1048 !mem_cgroup_is_root(memcg
));
1051 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
1052 enum memcg_memory_event event
)
1054 struct mem_cgroup
*memcg
;
1056 if (mem_cgroup_disabled())
1060 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
1062 memcg_memory_event(memcg
, event
);
1066 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1067 void mem_cgroup_split_huge_fixup(struct page
*head
);
1070 #else /* CONFIG_MEMCG */
1072 #define MEM_CGROUP_ID_SHIFT 0
1073 #define MEM_CGROUP_ID_MAX 0
1077 static inline struct mem_cgroup
*page_memcg(struct page
*page
)
1082 static inline struct mem_cgroup
*page_memcg_rcu(struct page
*page
)
1084 WARN_ON_ONCE(!rcu_read_lock_held());
1088 static inline struct mem_cgroup
*page_memcg_check(struct page
*page
)
1093 static inline bool PageMemcgKmem(struct page
*page
)
1098 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
1103 static inline bool mem_cgroup_disabled(void)
1108 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
1109 enum memcg_memory_event event
)
1113 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
1114 enum memcg_memory_event event
)
1118 static inline unsigned long mem_cgroup_protection(struct mem_cgroup
*root
,
1119 struct mem_cgroup
*memcg
,
1120 bool in_low_reclaim
)
1125 static inline void mem_cgroup_calculate_protection(struct mem_cgroup
*root
,
1126 struct mem_cgroup
*memcg
)
1130 static inline bool mem_cgroup_below_low(struct mem_cgroup
*memcg
)
1135 static inline bool mem_cgroup_below_min(struct mem_cgroup
*memcg
)
1140 static inline int mem_cgroup_charge(struct page
*page
, struct mm_struct
*mm
,
1146 static inline void mem_cgroup_uncharge(struct page
*page
)
1150 static inline void mem_cgroup_uncharge_list(struct list_head
*page_list
)
1154 static inline void mem_cgroup_migrate(struct page
*old
, struct page
*new)
1158 static inline struct lruvec
*mem_cgroup_lruvec(struct mem_cgroup
*memcg
,
1159 struct pglist_data
*pgdat
)
1161 return &pgdat
->__lruvec
;
1164 static inline struct lruvec
*mem_cgroup_page_lruvec(struct page
*page
,
1165 struct pglist_data
*pgdat
)
1167 return &pgdat
->__lruvec
;
1170 static inline bool lruvec_holds_page_lru_lock(struct page
*page
,
1171 struct lruvec
*lruvec
)
1173 pg_data_t
*pgdat
= page_pgdat(page
);
1175 return lruvec
== &pgdat
->__lruvec
;
1178 static inline struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
1183 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
1184 struct mem_cgroup
*memcg
)
1189 static inline struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
)
1194 static inline struct mem_cgroup
*get_mem_cgroup_from_page(struct page
*page
)
1199 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
1203 static inline struct lruvec
*lock_page_lruvec(struct page
*page
)
1205 struct pglist_data
*pgdat
= page_pgdat(page
);
1207 spin_lock(&pgdat
->__lruvec
.lru_lock
);
1208 return &pgdat
->__lruvec
;
1211 static inline struct lruvec
*lock_page_lruvec_irq(struct page
*page
)
1213 struct pglist_data
*pgdat
= page_pgdat(page
);
1215 spin_lock_irq(&pgdat
->__lruvec
.lru_lock
);
1216 return &pgdat
->__lruvec
;
1219 static inline struct lruvec
*lock_page_lruvec_irqsave(struct page
*page
,
1220 unsigned long *flagsp
)
1222 struct pglist_data
*pgdat
= page_pgdat(page
);
1224 spin_lock_irqsave(&pgdat
->__lruvec
.lru_lock
, *flagsp
);
1225 return &pgdat
->__lruvec
;
1228 static inline struct mem_cgroup
*
1229 mem_cgroup_iter(struct mem_cgroup
*root
,
1230 struct mem_cgroup
*prev
,
1231 struct mem_cgroup_reclaim_cookie
*reclaim
)
1236 static inline void mem_cgroup_iter_break(struct mem_cgroup
*root
,
1237 struct mem_cgroup
*prev
)
1241 static inline int mem_cgroup_scan_tasks(struct mem_cgroup
*memcg
,
1242 int (*fn
)(struct task_struct
*, void *), void *arg
)
1247 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
1252 static inline struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
)
1255 /* XXX: This should always return root_mem_cgroup */
1259 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
1264 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
1269 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
1275 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
1276 enum lru_list lru
, int zone_idx
)
1281 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
)
1286 static inline unsigned long mem_cgroup_size(struct mem_cgroup
*memcg
)
1292 mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
, struct task_struct
*p
)
1297 mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
)
1301 static inline struct mem_cgroup
*lock_page_memcg(struct page
*page
)
1306 static inline void __unlock_page_memcg(struct mem_cgroup
*memcg
)
1310 static inline void unlock_page_memcg(struct page
*page
)
1314 static inline void mem_cgroup_handle_over_high(void)
1318 static inline void mem_cgroup_enter_user_fault(void)
1322 static inline void mem_cgroup_exit_user_fault(void)
1326 static inline bool task_in_memcg_oom(struct task_struct
*p
)
1331 static inline bool mem_cgroup_oom_synchronize(bool wait
)
1336 static inline struct mem_cgroup
*mem_cgroup_get_oom_group(
1337 struct task_struct
*victim
, struct mem_cgroup
*oom_domain
)
1342 static inline void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
)
1346 static inline unsigned long memcg_page_state(struct mem_cgroup
*memcg
, int idx
)
1351 static inline unsigned long memcg_page_state_local(struct mem_cgroup
*memcg
,
1357 static inline void __mod_memcg_state(struct mem_cgroup
*memcg
,
1363 static inline void mod_memcg_state(struct mem_cgroup
*memcg
,
1369 static inline unsigned long lruvec_page_state(struct lruvec
*lruvec
,
1370 enum node_stat_item idx
)
1372 return node_page_state(lruvec_pgdat(lruvec
), idx
);
1375 static inline unsigned long lruvec_page_state_local(struct lruvec
*lruvec
,
1376 enum node_stat_item idx
)
1378 return node_page_state(lruvec_pgdat(lruvec
), idx
);
1381 static inline void __mod_memcg_lruvec_state(struct lruvec
*lruvec
,
1382 enum node_stat_item idx
, int val
)
1386 static inline void __mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
1389 struct page
*page
= virt_to_head_page(p
);
1391 __mod_node_page_state(page_pgdat(page
), idx
, val
);
1394 static inline void mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
1397 struct page
*page
= virt_to_head_page(p
);
1399 mod_node_page_state(page_pgdat(page
), idx
, val
);
1403 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
1405 unsigned long *total_scanned
)
1410 static inline void mem_cgroup_split_huge_fixup(struct page
*head
)
1414 static inline void count_memcg_events(struct mem_cgroup
*memcg
,
1415 enum vm_event_item idx
,
1416 unsigned long count
)
1420 static inline void __count_memcg_events(struct mem_cgroup
*memcg
,
1421 enum vm_event_item idx
,
1422 unsigned long count
)
1426 static inline void count_memcg_page_event(struct page
*page
,
1432 void count_memcg_event_mm(struct mm_struct
*mm
, enum vm_event_item idx
)
1436 static inline void lruvec_memcg_debug(struct lruvec
*lruvec
, struct page
*page
)
1439 #endif /* CONFIG_MEMCG */
1441 static inline void __inc_lruvec_kmem_state(void *p
, enum node_stat_item idx
)
1443 __mod_lruvec_kmem_state(p
, idx
, 1);
1446 static inline void __dec_lruvec_kmem_state(void *p
, enum node_stat_item idx
)
1448 __mod_lruvec_kmem_state(p
, idx
, -1);
1451 static inline struct lruvec
*parent_lruvec(struct lruvec
*lruvec
)
1453 struct mem_cgroup
*memcg
;
1455 memcg
= lruvec_memcg(lruvec
);
1458 memcg
= parent_mem_cgroup(memcg
);
1461 return mem_cgroup_lruvec(memcg
, lruvec_pgdat(lruvec
));
1464 static inline void unlock_page_lruvec(struct lruvec
*lruvec
)
1466 spin_unlock(&lruvec
->lru_lock
);
1469 static inline void unlock_page_lruvec_irq(struct lruvec
*lruvec
)
1471 spin_unlock_irq(&lruvec
->lru_lock
);
1474 static inline void unlock_page_lruvec_irqrestore(struct lruvec
*lruvec
,
1475 unsigned long flags
)
1477 spin_unlock_irqrestore(&lruvec
->lru_lock
, flags
);
1480 /* Don't lock again iff page's lruvec locked */
1481 static inline struct lruvec
*relock_page_lruvec_irq(struct page
*page
,
1482 struct lruvec
*locked_lruvec
)
1484 if (locked_lruvec
) {
1485 if (lruvec_holds_page_lru_lock(page
, locked_lruvec
))
1486 return locked_lruvec
;
1488 unlock_page_lruvec_irq(locked_lruvec
);
1491 return lock_page_lruvec_irq(page
);
1494 /* Don't lock again iff page's lruvec locked */
1495 static inline struct lruvec
*relock_page_lruvec_irqsave(struct page
*page
,
1496 struct lruvec
*locked_lruvec
, unsigned long *flags
)
1498 if (locked_lruvec
) {
1499 if (lruvec_holds_page_lru_lock(page
, locked_lruvec
))
1500 return locked_lruvec
;
1502 unlock_page_lruvec_irqrestore(locked_lruvec
, *flags
);
1505 return lock_page_lruvec_irqsave(page
, flags
);
1508 #ifdef CONFIG_CGROUP_WRITEBACK
1510 struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
);
1511 void mem_cgroup_wb_stats(struct bdi_writeback
*wb
, unsigned long *pfilepages
,
1512 unsigned long *pheadroom
, unsigned long *pdirty
,
1513 unsigned long *pwriteback
);
1515 void mem_cgroup_track_foreign_dirty_slowpath(struct page
*page
,
1516 struct bdi_writeback
*wb
);
1518 static inline void mem_cgroup_track_foreign_dirty(struct page
*page
,
1519 struct bdi_writeback
*wb
)
1521 if (mem_cgroup_disabled())
1524 if (unlikely(&page_memcg(page
)->css
!= wb
->memcg_css
))
1525 mem_cgroup_track_foreign_dirty_slowpath(page
, wb
);
1528 void mem_cgroup_flush_foreign(struct bdi_writeback
*wb
);
1530 #else /* CONFIG_CGROUP_WRITEBACK */
1532 static inline struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
)
1537 static inline void mem_cgroup_wb_stats(struct bdi_writeback
*wb
,
1538 unsigned long *pfilepages
,
1539 unsigned long *pheadroom
,
1540 unsigned long *pdirty
,
1541 unsigned long *pwriteback
)
1545 static inline void mem_cgroup_track_foreign_dirty(struct page
*page
,
1546 struct bdi_writeback
*wb
)
1550 static inline void mem_cgroup_flush_foreign(struct bdi_writeback
*wb
)
1554 #endif /* CONFIG_CGROUP_WRITEBACK */
1557 bool mem_cgroup_charge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1558 void mem_cgroup_uncharge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1560 extern struct static_key_false memcg_sockets_enabled_key
;
1561 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1562 void mem_cgroup_sk_alloc(struct sock
*sk
);
1563 void mem_cgroup_sk_free(struct sock
*sk
);
1564 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1566 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
) && memcg
->tcpmem_pressure
)
1569 if (time_before(jiffies
, memcg
->socket_pressure
))
1571 } while ((memcg
= parent_mem_cgroup(memcg
)));
1575 extern int memcg_expand_shrinker_maps(int new_id
);
1577 extern void memcg_set_shrinker_bit(struct mem_cgroup
*memcg
,
1578 int nid
, int shrinker_id
);
1580 #define mem_cgroup_sockets_enabled 0
1581 static inline void mem_cgroup_sk_alloc(struct sock
*sk
) { };
1582 static inline void mem_cgroup_sk_free(struct sock
*sk
) { };
1583 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1588 static inline void memcg_set_shrinker_bit(struct mem_cgroup
*memcg
,
1589 int nid
, int shrinker_id
)
1594 #ifdef CONFIG_MEMCG_KMEM
1595 int __memcg_kmem_charge(struct mem_cgroup
*memcg
, gfp_t gfp
,
1596 unsigned int nr_pages
);
1597 void __memcg_kmem_uncharge(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1598 int __memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
, int order
);
1599 void __memcg_kmem_uncharge_page(struct page
*page
, int order
);
1601 struct obj_cgroup
*get_obj_cgroup_from_current(void);
1603 int obj_cgroup_charge(struct obj_cgroup
*objcg
, gfp_t gfp
, size_t size
);
1604 void obj_cgroup_uncharge(struct obj_cgroup
*objcg
, size_t size
);
1606 extern struct static_key_false memcg_kmem_enabled_key
;
1608 extern int memcg_nr_cache_ids
;
1609 void memcg_get_cache_ids(void);
1610 void memcg_put_cache_ids(void);
1613 * Helper macro to loop through all memcg-specific caches. Callers must still
1614 * check if the cache is valid (it is either valid or NULL).
1615 * the slab_mutex must be held when looping through those caches
1617 #define for_each_memcg_cache_index(_idx) \
1618 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1620 static inline bool memcg_kmem_enabled(void)
1622 return static_branch_likely(&memcg_kmem_enabled_key
);
1625 static inline int memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1628 if (memcg_kmem_enabled())
1629 return __memcg_kmem_charge_page(page
, gfp
, order
);
1633 static inline void memcg_kmem_uncharge_page(struct page
*page
, int order
)
1635 if (memcg_kmem_enabled())
1636 __memcg_kmem_uncharge_page(page
, order
);
1640 * A helper for accessing memcg's kmem_id, used for getting
1641 * corresponding LRU lists.
1643 static inline int memcg_cache_id(struct mem_cgroup
*memcg
)
1645 return memcg
? memcg
->kmemcg_id
: -1;
1648 struct mem_cgroup
*mem_cgroup_from_obj(void *p
);
1652 static inline int memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1658 static inline void memcg_kmem_uncharge_page(struct page
*page
, int order
)
1662 static inline int __memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1668 static inline void __memcg_kmem_uncharge_page(struct page
*page
, int order
)
1672 #define for_each_memcg_cache_index(_idx) \
1675 static inline bool memcg_kmem_enabled(void)
1680 static inline int memcg_cache_id(struct mem_cgroup
*memcg
)
1685 static inline void memcg_get_cache_ids(void)
1689 static inline void memcg_put_cache_ids(void)
1693 static inline struct mem_cgroup
*mem_cgroup_from_obj(void *p
)
1698 #endif /* CONFIG_MEMCG_KMEM */
1700 #endif /* _LINUX_MEMCONTROL_H */