]>
Commit | Line | Data |
---|---|---|
2e892f43 CL |
1 | #ifndef _LINUX_SLAB_DEF_H |
2 | #define _LINUX_SLAB_DEF_H | |
3 | ||
4 | /* | |
5 | * Definitions unique to the original Linux SLAB allocator. | |
6 | * | |
7 | * What we provide here is a way to optimize the frequent kmalloc | |
8 | * calls in the kernel by selecting the appropriate general cache | |
9 | * if kmalloc was called with a size that can be established at | |
10 | * compile time. | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | |
16 | #include <linux/compiler.h> | |
17 | ||
18 | /* Size description struct for general caches. */ | |
19 | struct cache_sizes { | |
20 | size_t cs_size; | |
21 | struct kmem_cache *cs_cachep; | |
4b51d669 | 22 | #ifdef CONFIG_ZONE_DMA |
2e892f43 | 23 | struct kmem_cache *cs_dmacachep; |
4b51d669 | 24 | #endif |
2e892f43 CL |
25 | }; |
26 | extern struct cache_sizes malloc_sizes[]; | |
27 | ||
6193a2ff PM |
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | |
30 | ||
2e892f43 CL |
31 | static inline void *kmalloc(size_t size, gfp_t flags) |
32 | { | |
33 | if (__builtin_constant_p(size)) { | |
34 | int i = 0; | |
35 | #define CACHE(x) \ | |
36 | if (size <= x) \ | |
37 | goto found; \ | |
38 | else \ | |
39 | i++; | |
40 | #include "kmalloc_sizes.h" | |
41 | #undef CACHE | |
42 | { | |
43 | extern void __you_cannot_kmalloc_that_much(void); | |
44 | __you_cannot_kmalloc_that_much(); | |
45 | } | |
46 | found: | |
4b51d669 CL |
47 | #ifdef CONFIG_ZONE_DMA |
48 | if (flags & GFP_DMA) | |
49 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | |
50 | flags); | |
51 | #endif | |
52 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | |
2e892f43 CL |
53 | } |
54 | return __kmalloc(size, flags); | |
55 | } | |
56 | ||
57 | static inline void *kzalloc(size_t size, gfp_t flags) | |
58 | { | |
59 | if (__builtin_constant_p(size)) { | |
60 | int i = 0; | |
61 | #define CACHE(x) \ | |
62 | if (size <= x) \ | |
63 | goto found; \ | |
64 | else \ | |
65 | i++; | |
66 | #include "kmalloc_sizes.h" | |
67 | #undef CACHE | |
68 | { | |
69 | extern void __you_cannot_kzalloc_that_much(void); | |
70 | __you_cannot_kzalloc_that_much(); | |
71 | } | |
72 | found: | |
4b51d669 CL |
73 | #ifdef CONFIG_ZONE_DMA |
74 | if (flags & GFP_DMA) | |
75 | return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep, | |
76 | flags); | |
77 | #endif | |
78 | return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags); | |
2e892f43 CL |
79 | } |
80 | return __kzalloc(size, flags); | |
81 | } | |
82 | ||
83 | #ifdef CONFIG_NUMA | |
84 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
6193a2ff | 85 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
2e892f43 CL |
86 | |
87 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
88 | { | |
89 | if (__builtin_constant_p(size)) { | |
90 | int i = 0; | |
91 | #define CACHE(x) \ | |
92 | if (size <= x) \ | |
93 | goto found; \ | |
94 | else \ | |
95 | i++; | |
96 | #include "kmalloc_sizes.h" | |
97 | #undef CACHE | |
98 | { | |
99 | extern void __you_cannot_kmalloc_that_much(void); | |
100 | __you_cannot_kmalloc_that_much(); | |
101 | } | |
102 | found: | |
4b51d669 CL |
103 | #ifdef CONFIG_ZONE_DMA |
104 | if (flags & GFP_DMA) | |
105 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | |
106 | flags, node); | |
107 | #endif | |
108 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | |
109 | flags, node); | |
2e892f43 CL |
110 | } |
111 | return __kmalloc_node(size, flags, node); | |
112 | } | |
113 | ||
114 | #endif /* CONFIG_NUMA */ | |
115 | ||
3ca12ee5 CL |
116 | extern const struct seq_operations slabinfo_op; |
117 | ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); | |
118 | ||
2e892f43 | 119 | #endif /* _LINUX_SLAB_DEF_H */ |