#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */
#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */
#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */
-#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */
#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
/* if you forget to add the bitmask here kernel will crash, period */
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
- __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
+ __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
/* This equals 0, but use constants in case they ever change */
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
- if (flags & __GFP_NO_GROW)
- return 0;
+ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & GFP_LEVEL_MASK);
flags | GFP_THISNODE, nid);
}
- if (!obj && !(flags & __GFP_NO_GROW)) {
+ if (!obj) {
/*
* This allocation will be performed within the constraints
* of the current cpuset / memory policy requirements.