]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Slab allocator functions that are independent of the allocator strategy | |
3 | * | |
4 | * (C) 2012 Christoph Lameter <cl@linux.com> | |
5 | */ | |
6 | #include <linux/slab.h> | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/poison.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/memory.h> | |
12 | #include <linux/compiler.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/uaccess.h> | |
16 | #include <asm/cacheflush.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/page.h> | |
19 | ||
20 | #include "slab.h" | |
21 | ||
22 | enum slab_state slab_state; | |
23 | LIST_HEAD(slab_caches); | |
24 | DEFINE_MUTEX(slab_mutex); | |
25 | struct kmem_cache *kmem_cache; | |
26 | ||
27 | #ifdef CONFIG_DEBUG_VM | |
28 | static int kmem_cache_sanity_check(const char *name, size_t size) | |
29 | { | |
30 | struct kmem_cache *s = NULL; | |
31 | ||
32 | if (!name || in_interrupt() || size < sizeof(void *) || | |
33 | size > KMALLOC_MAX_SIZE) { | |
34 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); | |
35 | return -EINVAL; | |
36 | } | |
37 | ||
38 | list_for_each_entry(s, &slab_caches, list) { | |
39 | char tmp; | |
40 | int res; | |
41 | ||
42 | /* | |
43 | * This happens when the module gets unloaded and doesn't | |
44 | * destroy its slab cache and no-one else reuses the vmalloc | |
45 | * area of the module. Print a warning. | |
46 | */ | |
47 | res = probe_kernel_address(s->name, tmp); | |
48 | if (res) { | |
49 | pr_err("Slab cache with size %d has lost its name\n", | |
50 | s->object_size); | |
51 | continue; | |
52 | } | |
53 | ||
54 | if (!strcmp(s->name, name)) { | |
55 | pr_err("%s (%s): Cache name already exists.\n", | |
56 | __func__, name); | |
57 | dump_stack(); | |
58 | s = NULL; | |
59 | return -EINVAL; | |
60 | } | |
61 | } | |
62 | ||
63 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | |
64 | return 0; | |
65 | } | |
66 | #else | |
67 | static inline int kmem_cache_sanity_check(const char *name, size_t size) | |
68 | { | |
69 | return 0; | |
70 | } | |
71 | #endif | |
72 | ||
73 | /* | |
74 | * kmem_cache_create - Create a cache. | |
75 | * @name: A string which is used in /proc/slabinfo to identify this cache. | |
76 | * @size: The size of objects to be created in this cache. | |
77 | * @align: The required alignment for the objects. | |
78 | * @flags: SLAB flags | |
79 | * @ctor: A constructor for the objects. | |
80 | * | |
81 | * Returns a ptr to the cache on success, NULL on failure. | |
82 | * Cannot be called within a interrupt, but can be interrupted. | |
83 | * The @ctor is run when new pages are allocated by the cache. | |
84 | * | |
85 | * The flags are | |
86 | * | |
87 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | |
88 | * to catch references to uninitialised memory. | |
89 | * | |
90 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | |
91 | * for buffer overruns. | |
92 | * | |
93 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | |
94 | * cacheline. This can be beneficial if you're counting cycles as closely | |
95 | * as davem. | |
96 | */ | |
97 | ||
98 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, | |
99 | unsigned long flags, void (*ctor)(void *)) | |
100 | { | |
101 | struct kmem_cache *s = NULL; | |
102 | int err = 0; | |
103 | ||
104 | get_online_cpus(); | |
105 | mutex_lock(&slab_mutex); | |
106 | ||
107 | if (!kmem_cache_sanity_check(name, size) == 0) | |
108 | goto out_locked; | |
109 | ||
110 | ||
111 | s = __kmem_cache_alias(name, size, align, flags, ctor); | |
112 | if (s) | |
113 | goto out_locked; | |
114 | ||
115 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | |
116 | if (s) { | |
117 | s->object_size = s->size = size; | |
118 | s->align = align; | |
119 | s->ctor = ctor; | |
120 | s->name = kstrdup(name, GFP_KERNEL); | |
121 | if (!s->name) { | |
122 | kmem_cache_free(kmem_cache, s); | |
123 | err = -ENOMEM; | |
124 | goto out_locked; | |
125 | } | |
126 | ||
127 | err = __kmem_cache_create(s, flags); | |
128 | if (!err) { | |
129 | ||
130 | s->refcount = 1; | |
131 | list_add(&s->list, &slab_caches); | |
132 | ||
133 | } else { | |
134 | kfree(s->name); | |
135 | kmem_cache_free(kmem_cache, s); | |
136 | } | |
137 | } else | |
138 | err = -ENOMEM; | |
139 | ||
140 | out_locked: | |
141 | mutex_unlock(&slab_mutex); | |
142 | put_online_cpus(); | |
143 | ||
144 | if (err) { | |
145 | ||
146 | if (flags & SLAB_PANIC) | |
147 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", | |
148 | name, err); | |
149 | else { | |
150 | printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", | |
151 | name, err); | |
152 | dump_stack(); | |
153 | } | |
154 | ||
155 | return NULL; | |
156 | } | |
157 | ||
158 | return s; | |
159 | } | |
160 | EXPORT_SYMBOL(kmem_cache_create); | |
161 | ||
162 | void kmem_cache_destroy(struct kmem_cache *s) | |
163 | { | |
164 | get_online_cpus(); | |
165 | mutex_lock(&slab_mutex); | |
166 | s->refcount--; | |
167 | if (!s->refcount) { | |
168 | list_del(&s->list); | |
169 | ||
170 | if (!__kmem_cache_shutdown(s)) { | |
171 | mutex_unlock(&slab_mutex); | |
172 | if (s->flags & SLAB_DESTROY_BY_RCU) | |
173 | rcu_barrier(); | |
174 | ||
175 | kfree(s->name); | |
176 | kmem_cache_free(kmem_cache, s); | |
177 | } else { | |
178 | list_add(&s->list, &slab_caches); | |
179 | mutex_unlock(&slab_mutex); | |
180 | printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", | |
181 | s->name); | |
182 | dump_stack(); | |
183 | } | |
184 | } else { | |
185 | mutex_unlock(&slab_mutex); | |
186 | } | |
187 | put_online_cpus(); | |
188 | } | |
189 | EXPORT_SYMBOL(kmem_cache_destroy); | |
190 | ||
191 | int slab_is_available(void) | |
192 | { | |
193 | return slab_state >= UP; | |
194 | } |