]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/slab_def.h
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
[mirror_ubuntu-bionic-kernel.git] / include / linux / slab_def.h
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
3
4 /*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17
18 /* Size description struct for general caches. */
19 struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
22 #ifdef CONFIG_ZONE_DMA
23 struct kmem_cache *cs_dmacachep;
24 #endif
25 };
26 extern struct cache_sizes malloc_sizes[];
27
28 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29 void *__kmalloc(size_t size, gfp_t flags);
30
31 static inline void *kmalloc(size_t size, gfp_t flags)
32 {
33 if (__builtin_constant_p(size)) {
34 int i = 0;
35
36 if (!size)
37 return ZERO_SIZE_PTR;
38
39 #define CACHE(x) \
40 if (size <= x) \
41 goto found; \
42 else \
43 i++;
44 #include "kmalloc_sizes.h"
45 #undef CACHE
46 {
47 extern void __you_cannot_kmalloc_that_much(void);
48 __you_cannot_kmalloc_that_much();
49 }
50 found:
51 #ifdef CONFIG_ZONE_DMA
52 if (flags & GFP_DMA)
53 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
54 flags);
55 #endif
56 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
57 }
58 return __kmalloc(size, flags);
59 }
60
61 static inline void *kzalloc(size_t size, gfp_t flags)
62 {
63 if (__builtin_constant_p(size)) {
64 int i = 0;
65
66 if (!size)
67 return ZERO_SIZE_PTR;
68
69 #define CACHE(x) \
70 if (size <= x) \
71 goto found; \
72 else \
73 i++;
74 #include "kmalloc_sizes.h"
75 #undef CACHE
76 {
77 extern void __you_cannot_kzalloc_that_much(void);
78 __you_cannot_kzalloc_that_much();
79 }
80 found:
81 #ifdef CONFIG_ZONE_DMA
82 if (flags & GFP_DMA)
83 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
84 flags);
85 #endif
86 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
87 }
88 return __kzalloc(size, flags);
89 }
90
91 #ifdef CONFIG_NUMA
92 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
93 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
94
95 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
96 {
97 if (__builtin_constant_p(size)) {
98 int i = 0;
99
100 if (!size)
101 return ZERO_SIZE_PTR;
102
103 #define CACHE(x) \
104 if (size <= x) \
105 goto found; \
106 else \
107 i++;
108 #include "kmalloc_sizes.h"
109 #undef CACHE
110 {
111 extern void __you_cannot_kmalloc_that_much(void);
112 __you_cannot_kmalloc_that_much();
113 }
114 found:
115 #ifdef CONFIG_ZONE_DMA
116 if (flags & GFP_DMA)
117 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
118 flags, node);
119 #endif
120 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
121 flags, node);
122 }
123 return __kmalloc_node(size, flags, node);
124 }
125
126 #endif /* CONFIG_NUMA */
127
128 extern const struct seq_operations slabinfo_op;
129 ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
130
131 #endif /* _LINUX_SLAB_DEF_H */