]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/memcontrol.h
mm: memcontrol: rewrite charge API
[mirror_ubuntu-bionic-kernel.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
f8d66542 22#include <linux/cgroup.h>
456f998e 23#include <linux/vm_event_item.h>
7ae1e1d0 24#include <linux/hardirq.h>
a8964b9b 25#include <linux/jump_label.h>
456f998e 26
78fb7466
PE
27struct mem_cgroup;
28struct page_cgroup;
8697d331
BS
29struct page;
30struct mm_struct;
2633d7a0 31struct kmem_cache;
78fb7466 32
68b4876d
SZ
33/*
34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
35 * These two lists should keep in accord with each other.
36 */
37enum mem_cgroup_stat_index {
38 /*
39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
40 */
41 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
42 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
43 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
44 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
3ea67d06 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
68b4876d
SZ
46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
47 MEM_CGROUP_STAT_NSTATS,
2a7106f2
GT
48};
49
5660048c
JW
50struct mem_cgroup_reclaim_cookie {
51 struct zone *zone;
52 int priority;
53 unsigned int generation;
54};
55
c255a458 56#ifdef CONFIG_MEMCG
00501b53
JW
57int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
58 gfp_t gfp_mask, struct mem_cgroup **memcgp);
59void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
60 bool lrucare);
61void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
925b7673
JW
62
63struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
fa9add64 64struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
569b846d
KH
65
66/* For coalescing uncharge for reducing memcg' overhead*/
67extern void mem_cgroup_uncharge_start(void);
68extern void mem_cgroup_uncharge_end(void);
69
3c541e14 70extern void mem_cgroup_uncharge_page(struct page *page);
69029cd5 71extern void mem_cgroup_uncharge_cache_page(struct page *page);
c9b0ed51 72
c3ac9a8a
JW
73bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
74 struct mem_cgroup *memcg);
ffbdccf5
DR
75bool task_in_mem_cgroup(struct task_struct *task,
76 const struct mem_cgroup *memcg);
3062fc67 77
e42d9d5d 78extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad2
BS
79extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
80
e1aab161 81extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
182446d0 82extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
e1aab161 83
2e4d4091 84static inline
587af308 85bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
2e4d4091 86{
587af308
JW
87 struct mem_cgroup *task_memcg;
88 bool match;
c3ac9a8a 89
2e4d4091 90 rcu_read_lock();
587af308
JW
91 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
92 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
2e4d4091 93 rcu_read_unlock();
c3ac9a8a 94 return match;
2e4d4091 95}
8a9f3ccd 96
c0ff4b85 97extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b 98
0030f535
JW
99extern void
100mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
101 struct mem_cgroup **memcgp);
c0ff4b85 102extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 103 struct page *oldpage, struct page *newpage, bool migration_ok);
ae41be37 104
694fbc0f
AM
105struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
106 struct mem_cgroup *,
107 struct mem_cgroup_reclaim_cookie *);
5660048c
JW
108void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
109
58ae83db
KH
110/*
111 * For memory reclaim.
112 */
c56d5c7d 113int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
889976db 114int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
4d7dcca2 115unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
fa9add64 116void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
e222432b
BS
117extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
118 struct task_struct *p);
ab936cbc
KH
119extern void mem_cgroup_replace_page_cache(struct page *oldpage,
120 struct page *newpage);
58ae83db 121
49426420 122static inline void mem_cgroup_oom_enable(void)
519e5247 123{
49426420
JW
124 WARN_ON(current->memcg_oom.may_oom);
125 current->memcg_oom.may_oom = 1;
519e5247
JW
126}
127
49426420 128static inline void mem_cgroup_oom_disable(void)
519e5247 129{
49426420
JW
130 WARN_ON(!current->memcg_oom.may_oom);
131 current->memcg_oom.may_oom = 0;
519e5247
JW
132}
133
3812c8c8
JW
134static inline bool task_in_memcg_oom(struct task_struct *p)
135{
49426420 136 return p->memcg_oom.memcg;
3812c8c8
JW
137}
138
49426420 139bool mem_cgroup_oom_synchronize(bool wait);
3812c8c8 140
c255a458 141#ifdef CONFIG_MEMCG_SWAP
c077719b
KH
142extern int do_swap_account;
143#endif
f8d66542
HT
144
145static inline bool mem_cgroup_disabled(void)
146{
073219e9 147 if (memory_cgrp_subsys.disabled)
f8d66542
HT
148 return true;
149 return false;
150}
151
89c06bd5
KH
152void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
153 unsigned long *flags);
154
4331f7d3
KH
155extern atomic_t memcg_moving;
156
89c06bd5
KH
157static inline void mem_cgroup_begin_update_page_stat(struct page *page,
158 bool *locked, unsigned long *flags)
159{
160 if (mem_cgroup_disabled())
161 return;
162 rcu_read_lock();
163 *locked = false;
4331f7d3
KH
164 if (atomic_read(&memcg_moving))
165 __mem_cgroup_begin_update_page_stat(page, locked, flags);
89c06bd5
KH
166}
167
168void __mem_cgroup_end_update_page_stat(struct page *page,
169 unsigned long *flags);
170static inline void mem_cgroup_end_update_page_stat(struct page *page,
171 bool *locked, unsigned long *flags)
172{
173 if (mem_cgroup_disabled())
174 return;
175 if (*locked)
176 __mem_cgroup_end_update_page_stat(page, flags);
177 rcu_read_unlock();
178}
179
2a7106f2 180void mem_cgroup_update_page_stat(struct page *page,
68b4876d 181 enum mem_cgroup_stat_index idx,
2a7106f2
GT
182 int val);
183
184static inline void mem_cgroup_inc_page_stat(struct page *page,
68b4876d 185 enum mem_cgroup_stat_index idx)
2a7106f2
GT
186{
187 mem_cgroup_update_page_stat(page, idx, 1);
188}
189
190static inline void mem_cgroup_dec_page_stat(struct page *page,
68b4876d 191 enum mem_cgroup_stat_index idx)
2a7106f2
GT
192{
193 mem_cgroup_update_page_stat(page, idx, -1);
194}
195
0608f43d
AM
196unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
197 gfp_t gfp_mask,
198 unsigned long *total_scanned);
a63d83f4 199
68ae564b
DR
200void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
201static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
202 enum vm_event_item idx)
203{
204 if (mem_cgroup_disabled())
205 return;
206 __mem_cgroup_count_vm_event(mm, idx);
207}
ca3e0214 208#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 209void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
210#endif
211
f212ad7c
DN
212#ifdef CONFIG_DEBUG_VM
213bool mem_cgroup_bad_page_check(struct page *page);
214void mem_cgroup_print_bad_page(struct page *page);
215#endif
c255a458 216#else /* CONFIG_MEMCG */
7a81b88c
KH
217struct mem_cgroup;
218
00501b53
JW
219static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
220 gfp_t gfp_mask,
221 struct mem_cgroup **memcgp)
7a81b88c 222{
00501b53 223 *memcgp = NULL;
7a81b88c
KH
224 return 0;
225}
226
00501b53
JW
227static inline void mem_cgroup_commit_charge(struct page *page,
228 struct mem_cgroup *memcg,
229 bool lrucare)
7a81b88c
KH
230{
231}
232
00501b53
JW
233static inline void mem_cgroup_cancel_charge(struct page *page,
234 struct mem_cgroup *memcg)
7a81b88c
KH
235{
236}
237
569b846d
KH
238static inline void mem_cgroup_uncharge_start(void)
239{
240}
241
242static inline void mem_cgroup_uncharge_end(void)
243{
244}
245
8a9f3ccd
BS
246static inline void mem_cgroup_uncharge_page(struct page *page)
247{
248}
249
69029cd5
KH
250static inline void mem_cgroup_uncharge_cache_page(struct page *page)
251{
252}
253
925b7673
JW
254static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
255 struct mem_cgroup *memcg)
08e552c6 256{
925b7673 257 return &zone->lruvec;
08e552c6
KH
258}
259
fa9add64
HD
260static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
261 struct zone *zone)
66e1707b 262{
925b7673 263 return &zone->lruvec;
66e1707b
BS
264}
265
e42d9d5d
WF
266static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
267{
268 return NULL;
269}
270
587af308 271static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 272 struct mem_cgroup *memcg)
bed7161a 273{
587af308 274 return true;
bed7161a
BS
275}
276
ffbdccf5
DR
277static inline bool task_in_mem_cgroup(struct task_struct *task,
278 const struct mem_cgroup *memcg)
4c4a2214 279{
ffbdccf5 280 return true;
4c4a2214
DR
281}
282
c0ff4b85
R
283static inline struct cgroup_subsys_state
284 *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b
WF
285{
286 return NULL;
287}
288
0030f535 289static inline void
ac39cf8c 290mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
0030f535 291 struct mem_cgroup **memcgp)
ae41be37 292{
ae41be37
KH
293}
294
c0ff4b85 295static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 296 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37
KH
297{
298}
299
5660048c
JW
300static inline struct mem_cgroup *
301mem_cgroup_iter(struct mem_cgroup *root,
302 struct mem_cgroup *prev,
303 struct mem_cgroup_reclaim_cookie *reclaim)
304{
305 return NULL;
306}
307
308static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
309 struct mem_cgroup *prev)
310{
311}
312
f8d66542
HT
313static inline bool mem_cgroup_disabled(void)
314{
315 return true;
316}
a636b327 317
14797e23 318static inline int
c56d5c7d 319mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23
KM
320{
321 return 1;
322}
323
a3d8e054 324static inline unsigned long
4d7dcca2 325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
a3d8e054
KM
326{
327 return 0;
328}
329
fa9add64
HD
330static inline void
331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
332 int increment)
3e2f41f1 333{
3e2f41f1
KM
334}
335
e222432b
BS
336static inline void
337mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
338{
339}
340
89c06bd5
KH
341static inline void mem_cgroup_begin_update_page_stat(struct page *page,
342 bool *locked, unsigned long *flags)
343{
344}
345
346static inline void mem_cgroup_end_update_page_stat(struct page *page,
347 bool *locked, unsigned long *flags)
348{
349}
350
49426420 351static inline void mem_cgroup_oom_enable(void)
519e5247
JW
352{
353}
354
49426420 355static inline void mem_cgroup_oom_disable(void)
519e5247
JW
356{
357}
358
3812c8c8
JW
359static inline bool task_in_memcg_oom(struct task_struct *p)
360{
361 return false;
362}
363
49426420 364static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
365{
366 return false;
367}
368
2a7106f2 369static inline void mem_cgroup_inc_page_stat(struct page *page,
68b4876d 370 enum mem_cgroup_stat_index idx)
2a7106f2
GT
371{
372}
373
374static inline void mem_cgroup_dec_page_stat(struct page *page,
68b4876d 375 enum mem_cgroup_stat_index idx)
d69b042f
BS
376{
377}
378
4e416953 379static inline
0608f43d
AM
380unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
381 gfp_t gfp_mask,
382 unsigned long *total_scanned)
4e416953 383{
0608f43d 384 return 0;
4e416953
BS
385}
386
e94c8a9c 387static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
388{
389}
390
456f998e
YH
391static inline
392void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
393{
394}
ab936cbc
KH
395static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
396 struct page *newpage)
397{
398}
c255a458 399#endif /* CONFIG_MEMCG */
78fb7466 400
c255a458 401#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
f212ad7c
DN
402static inline bool
403mem_cgroup_bad_page_check(struct page *page)
404{
405 return false;
406}
407
408static inline void
409mem_cgroup_print_bad_page(struct page *page)
410{
411}
412#endif
413
e1aab161
GC
414enum {
415 UNDER_LIMIT,
416 SOFT_LIMIT,
417 OVER_LIMIT,
418};
419
420struct sock;
cd59085a 421#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161
GC
422void sock_update_memcg(struct sock *sk);
423void sock_release_memcg(struct sock *sk);
424#else
425static inline void sock_update_memcg(struct sock *sk)
426{
427}
428static inline void sock_release_memcg(struct sock *sk)
429{
430}
cd59085a 431#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
7ae1e1d0
GC
432
433#ifdef CONFIG_MEMCG_KMEM
a8964b9b 434extern struct static_key memcg_kmem_enabled_key;
749c5415
GC
435
436extern int memcg_limited_groups_array_size;
ebe945c2
GC
437
438/*
439 * Helper macro to loop through all memcg-specific caches. Callers must still
440 * check if the cache is valid (it is either valid or NULL).
441 * the slab_mutex must be held when looping through those caches
442 */
749c5415 443#define for_each_memcg_cache_index(_idx) \
91c777d8 444 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
749c5415 445
7ae1e1d0
GC
446static inline bool memcg_kmem_enabled(void)
447{
a8964b9b 448 return static_key_false(&memcg_kmem_enabled_key);
7ae1e1d0
GC
449}
450
451/*
452 * In general, we'll do everything in our power to not incur in any overhead
453 * for non-memcg users for the kmem functions. Not even a function call, if we
454 * can avoid it.
455 *
456 * Therefore, we'll inline all those functions so that in the best case, we'll
457 * see that kmemcg is off for everybody and proceed quickly. If it is on,
458 * we'll still do most of the flag checking inline. We check a lot of
459 * conditions, but because they are pretty simple, they are expected to be
460 * fast.
461 */
462bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
463 int order);
464void __memcg_kmem_commit_charge(struct page *page,
465 struct mem_cgroup *memcg, int order);
466void __memcg_kmem_uncharge_pages(struct page *page, int order);
467
2633d7a0 468int memcg_cache_id(struct mem_cgroup *memcg);
5722d094 469
363a044f
VD
470int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
471 struct kmem_cache *root_cache);
472void memcg_free_cache_params(struct kmem_cache *s);
2633d7a0 473
55007d84
GC
474int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
475void memcg_update_array_size(int num_groups);
d7f25f8a
GC
476
477struct kmem_cache *
478__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
479
c67a8a68
VD
480int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
481void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
5dfb4175 482
776ed0f0 483int __memcg_cleanup_cache_params(struct kmem_cache *s);
1f458cbf 484
7ae1e1d0
GC
485/**
486 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
487 * @gfp: the gfp allocation flags.
488 * @memcg: a pointer to the memcg this was charged against.
489 * @order: allocation order.
490 *
491 * returns true if the memcg where the current task belongs can hold this
492 * allocation.
493 *
494 * We return true automatically if this allocation is not to be accounted to
495 * any memcg.
496 */
497static inline bool
498memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
499{
500 if (!memcg_kmem_enabled())
501 return true;
502
503 /*
504 * __GFP_NOFAIL allocations will move on even if charging is not
505 * possible. Therefore we don't even try, and have this allocation
506 * unaccounted. We could in theory charge it with
507 * res_counter_charge_nofail, but we hope those allocations are rare,
508 * and won't be worth the trouble.
509 */
52383431 510 if (gfp & __GFP_NOFAIL)
7ae1e1d0
GC
511 return true;
512 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
513 return true;
514
515 /* If the test is dying, just let it go. */
516 if (unlikely(fatal_signal_pending(current)))
517 return true;
518
519 return __memcg_kmem_newpage_charge(gfp, memcg, order);
520}
521
522/**
523 * memcg_kmem_uncharge_pages: uncharge pages from memcg
524 * @page: pointer to struct page being freed
525 * @order: allocation order.
526 *
527 * there is no need to specify memcg here, since it is embedded in page_cgroup
528 */
529static inline void
530memcg_kmem_uncharge_pages(struct page *page, int order)
531{
532 if (memcg_kmem_enabled())
533 __memcg_kmem_uncharge_pages(page, order);
534}
535
536/**
537 * memcg_kmem_commit_charge: embeds correct memcg in a page
538 * @page: pointer to struct page recently allocated
539 * @memcg: the memcg structure we charged against
540 * @order: allocation order.
541 *
542 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
543 * failure of the allocation. if @page is NULL, this function will revert the
544 * charges. Otherwise, it will commit the memcg given by @memcg to the
545 * corresponding page_cgroup.
546 */
547static inline void
548memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
549{
550 if (memcg_kmem_enabled() && memcg)
551 __memcg_kmem_commit_charge(page, memcg, order);
552}
553
d7f25f8a
GC
554/**
555 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
556 * @cachep: the original global kmem cache
557 * @gfp: allocation flags.
558 *
5dfb4175 559 * All memory allocated from a per-memcg cache is charged to the owner memcg.
d7f25f8a
GC
560 */
561static __always_inline struct kmem_cache *
562memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
563{
564 if (!memcg_kmem_enabled())
565 return cachep;
566 if (gfp & __GFP_NOFAIL)
567 return cachep;
568 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
569 return cachep;
570 if (unlikely(fatal_signal_pending(current)))
571 return cachep;
572
573 return __memcg_kmem_get_cache(cachep, gfp);
574}
7ae1e1d0 575#else
749c5415
GC
576#define for_each_memcg_cache_index(_idx) \
577 for (; NULL; )
578
b9ce5ef4
GC
579static inline bool memcg_kmem_enabled(void)
580{
581 return false;
582}
583
7ae1e1d0
GC
584static inline bool
585memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
586{
587 return true;
588}
589
590static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
591{
592}
593
594static inline void
595memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
596{
597}
2633d7a0
GC
598
599static inline int memcg_cache_id(struct mem_cgroup *memcg)
600{
601 return -1;
602}
603
363a044f
VD
604static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
605 struct kmem_cache *s, struct kmem_cache *root_cache)
2633d7a0
GC
606{
607 return 0;
608}
609
363a044f
VD
610static inline void memcg_free_cache_params(struct kmem_cache *s)
611{
612}
613
d7f25f8a
GC
614static inline struct kmem_cache *
615memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
616{
617 return cachep;
618}
7ae1e1d0 619#endif /* CONFIG_MEMCG_KMEM */
8cdea7c0
BS
620#endif /* _LINUX_MEMCONTROL_H */
621