]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/slab.h
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[mirror_ubuntu-artful-kernel.git] / include / linux / slab.h
1 /*
2 * linux/mm/slab.h
3 * Written by Mark Hemment, 1996.
4 * (markhe@nextd.demon.co.uk)
5 */
6
7 #ifndef _LINUX_SLAB_H
8 #define _LINUX_SLAB_H
9
10 #if defined(__KERNEL__)
11
12 typedef struct kmem_cache kmem_cache_t;
13
14 #include <linux/gfp.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
18 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
19
20 /* flags for kmem_cache_alloc() */
21 #define SLAB_NOFS GFP_NOFS
22 #define SLAB_NOIO GFP_NOIO
23 #define SLAB_ATOMIC GFP_ATOMIC
24 #define SLAB_USER GFP_USER
25 #define SLAB_KERNEL GFP_KERNEL
26 #define SLAB_DMA GFP_DMA
27
28 #define SLAB_LEVEL_MASK GFP_LEVEL_MASK
29
30 #define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */
31
32 /* flags to pass to kmem_cache_create().
33 * The first 3 are only valid when the allocator as been build
34 * SLAB_DEBUG_SUPPORT.
35 */
36 #define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */
37 #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
38 #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
39 #define SLAB_POISON 0x00000800UL /* Poison objects */
40 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
41 #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
42 #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
43 #define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */
44 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate
45 what is reclaimable later*/
46 #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */
47 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
48 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
49
50 /* flags passed to a constructor func */
51 #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
52 #define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
53 #define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
54
55 #ifndef CONFIG_SLOB
56
57 /* prototypes */
58 extern void __init kmem_cache_init(void);
59
60 extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
61 void (*)(void *, kmem_cache_t *, unsigned long),
62 void (*)(void *, kmem_cache_t *, unsigned long));
63 extern int kmem_cache_destroy(kmem_cache_t *);
64 extern int kmem_cache_shrink(kmem_cache_t *);
65 extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
66 extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
67 extern void kmem_cache_free(kmem_cache_t *, void *);
68 extern unsigned int kmem_cache_size(kmem_cache_t *);
69 extern const char *kmem_cache_name(kmem_cache_t *);
70 extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
71
72 /* Size description struct for general caches. */
73 struct cache_sizes {
74 size_t cs_size;
75 kmem_cache_t *cs_cachep;
76 kmem_cache_t *cs_dmacachep;
77 };
78 extern struct cache_sizes malloc_sizes[];
79
80 extern void *__kmalloc(size_t, gfp_t);
81 #ifndef CONFIG_DEBUG_SLAB
82 #define ____kmalloc(size, flags) __kmalloc(size, flags)
83 #else
84 extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
85 #define ____kmalloc(size, flags) \
86 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
87 #endif
88
89 static inline void *kmalloc(size_t size, gfp_t flags)
90 {
91 if (__builtin_constant_p(size)) {
92 int i = 0;
93 #define CACHE(x) \
94 if (size <= x) \
95 goto found; \
96 else \
97 i++;
98 #include "kmalloc_sizes.h"
99 #undef CACHE
100 {
101 extern void __you_cannot_kmalloc_that_much(void);
102 __you_cannot_kmalloc_that_much();
103 }
104 found:
105 return kmem_cache_alloc((flags & GFP_DMA) ?
106 malloc_sizes[i].cs_dmacachep :
107 malloc_sizes[i].cs_cachep, flags);
108 }
109 return __kmalloc(size, flags);
110 }
111
112 extern void *__kzalloc(size_t, gfp_t);
113
114 static inline void *kzalloc(size_t size, gfp_t flags)
115 {
116 if (__builtin_constant_p(size)) {
117 int i = 0;
118 #define CACHE(x) \
119 if (size <= x) \
120 goto found; \
121 else \
122 i++;
123 #include "kmalloc_sizes.h"
124 #undef CACHE
125 {
126 extern void __you_cannot_kzalloc_that_much(void);
127 __you_cannot_kzalloc_that_much();
128 }
129 found:
130 return kmem_cache_zalloc((flags & GFP_DMA) ?
131 malloc_sizes[i].cs_dmacachep :
132 malloc_sizes[i].cs_cachep, flags);
133 }
134 return __kzalloc(size, flags);
135 }
136
137 /**
138 * kcalloc - allocate memory for an array. The memory is set to zero.
139 * @n: number of elements.
140 * @size: element size.
141 * @flags: the type of memory to allocate.
142 */
143 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
144 {
145 if (n != 0 && size > ULONG_MAX / n)
146 return NULL;
147 return kzalloc(n * size, flags);
148 }
149
150 extern void kfree(const void *);
151 extern unsigned int ksize(const void *);
152 extern int slab_is_available(void);
153
154 #ifdef CONFIG_NUMA
155 extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
156 extern void *kmalloc_node(size_t size, gfp_t flags, int node);
157 #else
158 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
159 {
160 return kmem_cache_alloc(cachep, flags);
161 }
162 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
163 {
164 return kmalloc(size, flags);
165 }
166 #endif
167
168 extern int FASTCALL(kmem_cache_reap(int));
169 extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
170
171 #else /* CONFIG_SLOB */
172
173 /* SLOB allocator routines */
174
175 void kmem_cache_init(void);
176 struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
177 struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
178 unsigned long,
179 void (*)(void *, struct kmem_cache *, unsigned long),
180 void (*)(void *, struct kmem_cache *, unsigned long));
181 int kmem_cache_destroy(struct kmem_cache *c);
182 void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
183 void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
184 void kmem_cache_free(struct kmem_cache *c, void *b);
185 const char *kmem_cache_name(struct kmem_cache *);
186 void *kmalloc(size_t size, gfp_t flags);
187 void *__kzalloc(size_t size, gfp_t flags);
188 void kfree(const void *m);
189 unsigned int ksize(const void *m);
190 unsigned int kmem_cache_size(struct kmem_cache *c);
191
192 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
193 {
194 return __kzalloc(n * size, flags);
195 }
196
197 #define kmem_cache_shrink(d) (0)
198 #define kmem_cache_reap(a)
199 #define kmem_ptr_validate(a, b) (0)
200 #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
201 #define kmalloc_node(s, f, n) kmalloc(s, f)
202 #define kzalloc(s, f) __kzalloc(s, f)
203 #define ____kmalloc kmalloc
204
205 #endif /* CONFIG_SLOB */
206
207 /* System wide caches */
208 extern kmem_cache_t *vm_area_cachep;
209 extern kmem_cache_t *names_cachep;
210 extern kmem_cache_t *files_cachep;
211 extern kmem_cache_t *filp_cachep;
212 extern kmem_cache_t *fs_cachep;
213 extern kmem_cache_t *sighand_cachep;
214 extern kmem_cache_t *bio_cachep;
215
216 extern atomic_t slab_reclaim_pages;
217
218 #endif /* __KERNEL__ */
219
220 #endif /* _LINUX_SLAB_H */