]> git.proxmox.com Git - mirror_spl-debian.git/blob - modules/spl/spl-kmem.c
08387d0d851f45e348937e8e77d611e6301afaed
[mirror_spl-debian.git] / modules / spl / spl-kmem.c
1 #include <sys/kmem.h>
2
3 /*
4 * Memory allocation interfaces
5 */
6 #ifdef DEBUG_KMEM
7 /* Shim layer memory accounting */
8 atomic64_t kmem_alloc_used;
9 unsigned long kmem_alloc_max = 0;
10 atomic64_t vmem_alloc_used;
11 unsigned long vmem_alloc_max = 0;
12 int kmem_warning_flag = 1;
13
14 EXPORT_SYMBOL(kmem_alloc_used);
15 EXPORT_SYMBOL(kmem_alloc_max);
16 EXPORT_SYMBOL(vmem_alloc_used);
17 EXPORT_SYMBOL(vmem_alloc_max);
18 EXPORT_SYMBOL(kmem_warning_flag);
19
20 int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
21 #else
22 int kmem_set_warning(int flag) { return 0; }
23 #endif
24 EXPORT_SYMBOL(kmem_set_warning);
25
26 /*
27 * Slab allocation interfaces
28 *
29 * While the linux slab implementation was inspired by solaris they
30 * have made some changes to the API which complicates this shim
31 * layer. For one thing the same symbol names are used with different
32 * arguments for the prototypes. To deal with this we must use the
33 * preprocessor to re-order arguments. Happily for us standard C says,
34 * "Macro's appearing in their own expansion are not reexpanded" so
35 * this does not result in an infinite recursion. Additionally the
36 * function pointers registered by solarias differ from those used
37 * by linux so a lookup and mapping from linux style callback to a
38 * solaris style callback is needed. There is some overhead in this
39 * operation which isn't horibile but it needs to be kept in mind.
40 */
41 typedef struct kmem_cache_cb {
42 struct list_head kcc_list;
43 kmem_cache_t * kcc_cache;
44 kmem_constructor_t kcc_constructor;
45 kmem_destructor_t kcc_destructor;
46 kmem_reclaim_t kcc_reclaim;
47 void * kcc_private;
48 void * kcc_vmp;
49 } kmem_cache_cb_t;
50
51
52 static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
53 //static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
54 static LIST_HEAD(kmem_cache_cb_list);
55 static struct shrinker *kmem_cache_shrinker;
56
57 /* Function must be called while holding the kmem_cache_cb_lock
58 * Because kmem_cache_t is an opaque datatype we're forced to
59 * match pointers to identify specific cache entires.
60 */
61 static kmem_cache_cb_t *
62 kmem_cache_find_cache_cb(kmem_cache_t *cache)
63 {
64 kmem_cache_cb_t *kcc;
65
66 list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
67 if (cache == kcc->kcc_cache)
68 return kcc;
69
70 return NULL;
71 }
72
73 static kmem_cache_cb_t *
74 kmem_cache_add_cache_cb(kmem_cache_t *cache,
75 kmem_constructor_t constructor,
76 kmem_destructor_t destructor,
77 kmem_reclaim_t reclaim,
78 void *priv, void *vmp)
79 {
80 kmem_cache_cb_t *kcc;
81
82 kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
83 if (kcc) {
84 kcc->kcc_cache = cache;
85 kcc->kcc_constructor = constructor;
86 kcc->kcc_destructor = destructor;
87 kcc->kcc_reclaim = reclaim;
88 kcc->kcc_private = priv;
89 kcc->kcc_vmp = vmp;
90 spin_lock(&kmem_cache_cb_lock);
91 list_add(&kcc->kcc_list, &kmem_cache_cb_list);
92 spin_unlock(&kmem_cache_cb_lock);
93 }
94
95 return kcc;
96 }
97
98 static void
99 kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
100 {
101 spin_lock(&kmem_cache_cb_lock);
102 list_del(&kcc->kcc_list);
103 spin_unlock(&kmem_cache_cb_lock);
104
105 if (kcc)
106 kfree(kcc);
107 }
108
109 static void
110 kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
111 {
112 kmem_cache_cb_t *kcc;
113
114 spin_lock(&kmem_cache_cb_lock);
115
116 /* Callback list must be in sync with linux slab caches */
117 kcc = kmem_cache_find_cache_cb(cache);
118 BUG_ON(!kcc);
119
120 kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
121 spin_unlock(&kmem_cache_cb_lock);
122 /* Linux constructor has no return code, silently eat it */
123 }
124
125 static void
126 kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
127 {
128 kmem_cache_cb_t *kcc;
129
130 spin_lock(&kmem_cache_cb_lock);
131
132 /* Callback list must be in sync with linux slab caches */
133 kcc = kmem_cache_find_cache_cb(cache);
134 BUG_ON(!kcc);
135
136 /* Solaris destructor takes no flags, silently eat them */
137 kcc->kcc_destructor(ptr, kcc->kcc_private);
138 spin_unlock(&kmem_cache_cb_lock);
139 }
140
141 /* XXX - Arguments are ignored */
142 static int
143 kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
144 {
145 kmem_cache_cb_t *kcc;
146 int total = 0;
147
148 /* Under linux a shrinker is not tightly coupled with a slab
149 * cache. In fact linux always systematically trys calling all
150 * registered shrinker callbacks until its target reclamation level
151 * is reached. Because of this we only register one shrinker
152 * function in the shim layer for all slab caches. And we always
153 * attempt to shrink all caches when this generic shrinker is called.
154 */
155 spin_lock(&kmem_cache_cb_lock);
156
157 list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
158 /* Under linux the desired number and gfp type of objects
159 * is passed to the reclaiming function as a sugested reclaim
160 * target. I do not pass these args on because reclaim
161 * policy is entirely up to the owner under solaris. We only
162 * pass on the pre-registered private data.
163 */
164 if (kcc->kcc_reclaim)
165 kcc->kcc_reclaim(kcc->kcc_private);
166
167 total += 1;
168 }
169
170 /* Under linux we should return the remaining number of entires in
171 * the cache. Unfortunately, I don't see an easy way to safely
172 * emulate this behavior so I'm returning one entry per cache which
173 * was registered with the generic shrinker. This should fake out
174 * the linux VM when it attempts to shrink caches.
175 */
176 spin_unlock(&kmem_cache_cb_lock);
177 return total;
178 }
179
180 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
181 * removed here to prevent a recursive substitution, we want to call
182 * the native linux version.
183 */
184 #undef kmem_cache_create
185 #undef kmem_cache_destroy
186
187 kmem_cache_t *
188 __kmem_cache_create(char *name, size_t size, size_t align,
189 kmem_constructor_t constructor,
190 kmem_destructor_t destructor,
191 kmem_reclaim_t reclaim,
192 void *priv, void *vmp, int flags)
193 {
194 kmem_cache_t *cache;
195 kmem_cache_cb_t *kcc;
196 int shrinker_flag = 0;
197 char *cache_name;
198
199 /* FIXME: - Option currently unsupported by shim layer */
200 BUG_ON(vmp);
201
202 cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL);
203 if (cache_name == NULL)
204 return NULL;
205
206 strcpy(cache_name, name);
207 cache = kmem_cache_create(cache_name, size, align, flags,
208 kmem_cache_generic_constructor,
209 kmem_cache_generic_destructor);
210 if (cache == NULL)
211 return NULL;
212
213 /* Register shared shrinker function on initial cache create */
214 spin_lock(&kmem_cache_cb_lock);
215 if (list_empty(&kmem_cache_cb_list)) {
216 kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
217 kmem_cache_generic_shrinker);
218 if (kmem_cache_shrinker == NULL) {
219 kmem_cache_destroy(cache);
220 spin_unlock(&kmem_cache_cb_lock);
221 return NULL;
222 }
223
224 }
225 spin_unlock(&kmem_cache_cb_lock);
226
227 kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
228 reclaim, priv, vmp);
229 if (kcc == NULL) {
230 if (shrinker_flag) /* New shrinker registered must be removed */
231 remove_shrinker(kmem_cache_shrinker);
232
233 kmem_cache_destroy(cache);
234 return NULL;
235 }
236
237 return cache;
238 }
239 EXPORT_SYMBOL(__kmem_cache_create);
240
241 /* Return code provided despite Solaris's void return. There should be no
242 * harm here since the Solaris versions will ignore it anyway. */
243 int
244 __kmem_cache_destroy(kmem_cache_t *cache)
245 {
246 kmem_cache_cb_t *kcc;
247 char *name;
248 int rc;
249
250 spin_lock(&kmem_cache_cb_lock);
251 kcc = kmem_cache_find_cache_cb(cache);
252 spin_unlock(&kmem_cache_cb_lock);
253 if (kcc == NULL)
254 return -EINVAL;
255
256 name = (char *)kmem_cache_name(cache);
257 rc = kmem_cache_destroy(cache);
258 kmem_cache_remove_cache_cb(kcc);
259 kfree(name);
260
261 /* Unregister generic shrinker on removal of all caches */
262 spin_lock(&kmem_cache_cb_lock);
263 if (list_empty(&kmem_cache_cb_list))
264 remove_shrinker(kmem_cache_shrinker);
265
266 spin_unlock(&kmem_cache_cb_lock);
267 return rc;
268 }
269 EXPORT_SYMBOL(__kmem_cache_destroy);
270
271 void
272 __kmem_reap(void) {
273 /* Since there's no easy hook in to linux to force all the registered
274 * shrinkers to run we just run the ones registered for this shim */
275 kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
276 }
277 EXPORT_SYMBOL(__kmem_reap);
278
279 int
280 kmem_init(void)
281 {
282 #ifdef DEBUG_KMEM
283 atomic64_set(&kmem_alloc_used, 0);
284 atomic64_set(&vmem_alloc_used, 0);
285 #endif
286 return 0;
287 }
288
289 void
290 kmem_fini(void)
291 {
292 #ifdef DEBUG_KMEM
293 if (atomic64_read(&kmem_alloc_used) != 0)
294 printk("spl: Warning kmem leaked %ld/%ld bytes\n",
295 atomic_read(&kmem_alloc_used), kmem_alloc_max);
296
297 if (atomic64_read(&vmem_alloc_used) != 0)
298 printk("spl: Warning vmem leaked %ld/%ld bytes\n",
299 atomic_read(&vmem_alloc_used), vmem_alloc_max);
300 #endif
301 }