]>
Commit | Line | Data |
---|---|---|
2e892f43 CL |
1 | #ifndef _LINUX_SLAB_DEF_H |
2 | #define _LINUX_SLAB_DEF_H | |
3 | ||
4 | /* | |
5 | * Definitions unique to the original Linux SLAB allocator. | |
6 | * | |
7 | * What we provide here is a way to optimize the frequent kmalloc | |
8 | * calls in the kernel by selecting the appropriate general cache | |
9 | * if kmalloc was called with a size that can be established at | |
10 | * compile time. | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
2e892f43 | 14 | #include <linux/compiler.h> |
039ca4e7 | 15 | |
8eae985f PE |
16 | /* |
17 | * struct kmem_cache | |
18 | * | |
19 | * manages a cache. | |
20 | */ | |
21 | ||
22 | struct kmem_cache { | |
b56efcf0 | 23 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
8eae985f PE |
24 | unsigned int batchcount; |
25 | unsigned int limit; | |
26 | unsigned int shared; | |
27 | ||
3b0efdfa | 28 | unsigned int size; |
8eae985f | 29 | u32 reciprocal_buffer_size; |
b56efcf0 | 30 | /* 2) touched by every alloc & free from the backend */ |
8eae985f PE |
31 | |
32 | unsigned int flags; /* constant flags */ | |
33 | unsigned int num; /* # of objs per slab */ | |
34 | ||
b56efcf0 | 35 | /* 3) cache_grow/shrink */ |
8eae985f PE |
36 | /* order of pgs per slab (2^n) */ |
37 | unsigned int gfporder; | |
38 | ||
39 | /* force GFP flags, e.g. GFP_DMA */ | |
a618e89f | 40 | gfp_t allocflags; |
8eae985f PE |
41 | |
42 | size_t colour; /* cache colouring range */ | |
43 | unsigned int colour_off; /* colour offset */ | |
44 | struct kmem_cache *slabp_cache; | |
45 | unsigned int slab_size; | |
8eae985f PE |
46 | |
47 | /* constructor func */ | |
48 | void (*ctor)(void *obj); | |
49 | ||
b56efcf0 | 50 | /* 4) cache creation/removal */ |
8eae985f | 51 | const char *name; |
3b0efdfa CL |
52 | struct list_head list; |
53 | int refcount; | |
54 | int object_size; | |
55 | int align; | |
8eae985f | 56 | |
b56efcf0 | 57 | /* 5) statistics */ |
8eae985f PE |
58 | #ifdef CONFIG_DEBUG_SLAB |
59 | unsigned long num_active; | |
60 | unsigned long num_allocations; | |
61 | unsigned long high_mark; | |
62 | unsigned long grown; | |
63 | unsigned long reaped; | |
64 | unsigned long errors; | |
65 | unsigned long max_freeable; | |
66 | unsigned long node_allocs; | |
67 | unsigned long node_frees; | |
68 | unsigned long node_overflow; | |
69 | atomic_t allochit; | |
70 | atomic_t allocmiss; | |
71 | atomic_t freehit; | |
72 | atomic_t freemiss; | |
73 | ||
74 | /* | |
75 | * If debugging is enabled, then the allocator can add additional | |
3b0efdfa | 76 | * fields and/or padding to every object. size contains the total |
8eae985f PE |
77 | * object size including these internal fields, the following two |
78 | * variables contain the offset to the user object and its size. | |
79 | */ | |
80 | int obj_offset; | |
8eae985f | 81 | #endif /* CONFIG_DEBUG_SLAB */ |
ba6c496e GC |
82 | #ifdef CONFIG_MEMCG_KMEM |
83 | struct memcg_cache_params *memcg_params; | |
84 | #endif | |
8eae985f | 85 | |
b56efcf0 | 86 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
8eae985f | 87 | /* |
b56efcf0 ED |
88 | * We put array[] at the end of kmem_cache, because we want to size |
89 | * this array to nr_cpu_ids slots instead of NR_CPUS | |
8eae985f | 90 | * (see kmem_cache_init()) |
b56efcf0 ED |
91 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
92 | * is statically defined, so we reserve the max number of cpus. | |
3c583465 CL |
93 | * |
94 | * We also need to guarantee that the list is able to accomodate a | |
95 | * pointer for each node since "nodelists" uses the remainder of | |
96 | * available pointers. | |
8eae985f | 97 | */ |
6a67368c | 98 | struct kmem_cache_node **node; |
3c583465 | 99 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
8eae985f | 100 | /* |
b56efcf0 | 101 | * Do not add fields after array[] |
8eae985f PE |
102 | */ |
103 | }; | |
104 | ||
e3366016 CL |
105 | extern struct kmem_cache *kmalloc_caches[PAGE_SHIFT + MAX_ORDER]; |
106 | extern struct kmem_cache *kmalloc_dma_caches[PAGE_SHIFT + MAX_ORDER]; | |
2e892f43 | 107 | |
6193a2ff PM |
108 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
109 | void *__kmalloc(size_t size, gfp_t flags); | |
110 | ||
0f24f128 | 111 | #ifdef CONFIG_TRACING |
4052147c | 112 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
36555751 EGM |
113 | #else |
114 | static __always_inline void * | |
4052147c | 115 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
2e892f43 | 116 | { |
36555751 EGM |
117 | return kmem_cache_alloc(cachep, flags); |
118 | } | |
36555751 EGM |
119 | #endif |
120 | ||
121 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
122 | { | |
123 | struct kmem_cache *cachep; | |
124 | void *ret; | |
125 | ||
2e892f43 | 126 | if (__builtin_constant_p(size)) { |
e3366016 | 127 | int i; |
6cb8f913 CL |
128 | |
129 | if (!size) | |
130 | return ZERO_SIZE_PTR; | |
131 | ||
e3366016 CL |
132 | i = kmalloc_index(size); |
133 | ||
4b51d669 CL |
134 | #ifdef CONFIG_ZONE_DMA |
135 | if (flags & GFP_DMA) | |
e3366016 | 136 | cachep = kmalloc_dma_caches[i]; |
36555751 | 137 | else |
4b51d669 | 138 | #endif |
e3366016 | 139 | cachep = kmalloc_caches[i]; |
36555751 | 140 | |
4052147c | 141 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
36555751 EGM |
142 | |
143 | return ret; | |
2e892f43 CL |
144 | } |
145 | return __kmalloc(size, flags); | |
146 | } | |
147 | ||
2e892f43 CL |
148 | #ifdef CONFIG_NUMA |
149 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
6193a2ff | 150 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
2e892f43 | 151 | |
0f24f128 | 152 | #ifdef CONFIG_TRACING |
dffa3f98 | 153 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb586 | 154 | gfp_t flags, |
dffa3f98 EG |
155 | int nodeid, |
156 | size_t size); | |
36555751 EGM |
157 | #else |
158 | static __always_inline void * | |
dffa3f98 | 159 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb586 | 160 | gfp_t flags, |
dffa3f98 EG |
161 | int nodeid, |
162 | size_t size) | |
36555751 EGM |
163 | { |
164 | return kmem_cache_alloc_node(cachep, flags, nodeid); | |
165 | } | |
166 | #endif | |
167 | ||
168 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
2e892f43 | 169 | { |
36555751 | 170 | struct kmem_cache *cachep; |
36555751 | 171 | |
2e892f43 | 172 | if (__builtin_constant_p(size)) { |
e3366016 | 173 | int i; |
6cb8f913 CL |
174 | |
175 | if (!size) | |
176 | return ZERO_SIZE_PTR; | |
177 | ||
e3366016 CL |
178 | i = kmalloc_index(size); |
179 | ||
4b51d669 CL |
180 | #ifdef CONFIG_ZONE_DMA |
181 | if (flags & GFP_DMA) | |
e3366016 | 182 | cachep = kmalloc_dma_caches[i]; |
36555751 | 183 | else |
4b51d669 | 184 | #endif |
e3366016 | 185 | cachep = kmalloc_caches[i]; |
36555751 | 186 | |
dffa3f98 | 187 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
2e892f43 CL |
188 | } |
189 | return __kmalloc_node(size, flags, node); | |
190 | } | |
191 | ||
192 | #endif /* CONFIG_NUMA */ | |
193 | ||
194 | #endif /* _LINUX_SLAB_DEF_H */ |