4 * Memory allocation interfaces
7 /* Shim layer memory accounting */
8 atomic64_t kmem_alloc_used
;
9 unsigned long kmem_alloc_max
= 0;
10 atomic64_t vmem_alloc_used
;
11 unsigned long vmem_alloc_max
= 0;
12 int kmem_warning_flag
= 1;
14 EXPORT_SYMBOL(kmem_alloc_used
);
15 EXPORT_SYMBOL(kmem_alloc_max
);
16 EXPORT_SYMBOL(vmem_alloc_used
);
17 EXPORT_SYMBOL(vmem_alloc_max
);
18 EXPORT_SYMBOL(kmem_warning_flag
);
20 int kmem_set_warning(int flag
) { return (kmem_warning_flag
= !!flag
); }
22 int kmem_set_warning(int flag
) { return 0; }
24 EXPORT_SYMBOL(kmem_set_warning
);
27 * Slab allocation interfaces
29 * While the linux slab implementation was inspired by solaris they
30 * have made some changes to the API which complicates this shim
31 * layer. For one thing the same symbol names are used with different
32 * arguments for the prototypes. To deal with this we must use the
33 * preprocessor to re-order arguments. Happily for us standard C says,
34 * "Macro's appearing in their own expansion are not reexpanded" so
35 * this does not result in an infinite recursion. Additionally the
36 * function pointers registered by solarias differ from those used
37 * by linux so a lookup and mapping from linux style callback to a
38 * solaris style callback is needed. There is some overhead in this
39 * operation which isn't horibile but it needs to be kept in mind.
41 typedef struct kmem_cache_cb
{
42 struct list_head kcc_list
;
43 kmem_cache_t
* kcc_cache
;
44 kmem_constructor_t kcc_constructor
;
45 kmem_destructor_t kcc_destructor
;
46 kmem_reclaim_t kcc_reclaim
;
52 static spinlock_t kmem_cache_cb_lock
= SPIN_LOCK_UNLOCKED
;
53 static LIST_HEAD(kmem_cache_cb_list
);
54 static struct shrinker
*kmem_cache_shrinker
;
56 /* Function must be called while holding the kmem_cache_cb_lock
57 * Because kmem_cache_t is an opaque datatype we're forced to
58 * match pointers to identify specific cache entires.
60 static kmem_cache_cb_t
*
61 kmem_cache_find_cache_cb(kmem_cache_t
*cache
)
65 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
)
66 if (cache
== kcc
->kcc_cache
)
72 static kmem_cache_cb_t
*
73 kmem_cache_add_cache_cb(kmem_cache_t
*cache
,
74 kmem_constructor_t constructor
,
75 kmem_destructor_t destructor
,
76 kmem_reclaim_t reclaim
,
77 void *priv
, void *vmp
)
82 kcc
= (kmem_cache_cb_t
*)kmalloc(sizeof(*kcc
), GFP_KERNEL
);
84 kcc
->kcc_cache
= cache
;
85 kcc
->kcc_constructor
= constructor
;
86 kcc
->kcc_destructor
= destructor
;
87 kcc
->kcc_reclaim
= reclaim
;
88 kcc
->kcc_private
= priv
;
90 spin_lock_irqsave(&kmem_cache_cb_lock
, flags
);
91 list_add(&kcc
->kcc_list
, &kmem_cache_cb_list
);
92 spin_unlock_irqrestore(&kmem_cache_cb_lock
, flags
);
99 kmem_cache_remove_cache_cb(kmem_cache_cb_t
*kcc
)
103 spin_lock_irqsave(&kmem_cache_cb_lock
, flags
);
104 list_del(&kcc
->kcc_list
);
105 spin_unlock_irqrestore(&kmem_cache_cb_lock
, flags
);
112 kmem_cache_generic_constructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
114 kmem_cache_cb_t
*kcc
;
115 kmem_constructor_t constructor
;
116 unsigned long irqflags
;
119 spin_lock_irqsave(&kmem_cache_cb_lock
, irqflags
);
121 /* Callback list must be in sync with linux slab caches */
122 kcc
= kmem_cache_find_cache_cb(cache
);
124 constructor
= kcc
->kcc_constructor
;
125 private = kcc
->kcc_private
;
127 spin_unlock_irqrestore(&kmem_cache_cb_lock
, irqflags
);
130 constructor(ptr
, private, (int)flags
);
132 /* Linux constructor has no return code, silently eat it */
136 kmem_cache_generic_destructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
138 kmem_cache_cb_t
*kcc
;
139 kmem_destructor_t destructor
;
140 unsigned long irqflags
;
143 spin_lock_irqsave(&kmem_cache_cb_lock
, irqflags
);
145 /* Callback list must be in sync with linux slab caches */
146 kcc
= kmem_cache_find_cache_cb(cache
);
148 destructor
= kcc
->kcc_destructor
;
149 private = kcc
->kcc_private
;
151 spin_unlock_irqrestore(&kmem_cache_cb_lock
, irqflags
);
153 /* Solaris destructor takes no flags, silently eat them */
155 destructor(ptr
, private);
158 /* XXX - Arguments are ignored */
160 kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
162 kmem_cache_cb_t
*kcc
;
166 /* Under linux a shrinker is not tightly coupled with a slab
167 * cache. In fact linux always systematically trys calling all
168 * registered shrinker callbacks until its target reclamation level
169 * is reached. Because of this we only register one shrinker
170 * function in the shim layer for all slab caches. And we always
171 * attempt to shrink all caches when this generic shrinker is called.
173 spin_lock_irqsave(&kmem_cache_cb_lock
, flags
);
175 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
) {
176 /* Under linux the desired number and gfp type of objects
177 * is passed to the reclaiming function as a sugested reclaim
178 * target. I do not pass these args on because reclaim
179 * policy is entirely up to the owner under solaris. We only
180 * pass on the pre-registered private data.
182 if (kcc
->kcc_reclaim
)
183 kcc
->kcc_reclaim(kcc
->kcc_private
);
188 /* Under linux we should return the remaining number of entires in
189 * the cache. Unfortunately, I don't see an easy way to safely
190 * emulate this behavior so I'm returning one entry per cache which
191 * was registered with the generic shrinker. This should fake out
192 * the linux VM when it attempts to shrink caches.
194 spin_unlock_irqrestore(&kmem_cache_cb_lock
, flags
);
198 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
199 * removed here to prevent a recursive substitution, we want to call
200 * the native linux version.
202 #undef kmem_cache_create
203 #undef kmem_cache_destroy
206 __kmem_cache_create(char *name
, size_t size
, size_t align
,
207 kmem_constructor_t constructor
,
208 kmem_destructor_t destructor
,
209 kmem_reclaim_t reclaim
,
210 void *priv
, void *vmp
, int flags
)
213 kmem_cache_cb_t
*kcc
;
214 int shrinker_flag
= 0;
217 /* FIXME: - Option currently unsupported by shim layer */
220 cache_name
= kzalloc(strlen(name
) + 1, GFP_KERNEL
);
221 if (cache_name
== NULL
)
224 strcpy(cache_name
, name
);
225 cache
= kmem_cache_create(cache_name
, size
, align
, flags
,
226 kmem_cache_generic_constructor
,
227 kmem_cache_generic_destructor
);
231 /* Register shared shrinker function on initial cache create */
232 spin_lock(&kmem_cache_cb_lock
);
233 if (list_empty(&kmem_cache_cb_list
)) {
234 kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
235 kmem_cache_generic_shrinker
);
236 if (kmem_cache_shrinker
== NULL
) {
237 kmem_cache_destroy(cache
);
238 spin_unlock(&kmem_cache_cb_lock
);
243 spin_unlock(&kmem_cache_cb_lock
);
245 kcc
= kmem_cache_add_cache_cb(cache
, constructor
, destructor
,
248 if (shrinker_flag
) /* New shrinker registered must be removed */
249 remove_shrinker(kmem_cache_shrinker
);
251 kmem_cache_destroy(cache
);
257 EXPORT_SYMBOL(__kmem_cache_create
);
259 /* Return code provided despite Solaris's void return. There should be no
260 * harm here since the Solaris versions will ignore it anyway. */
262 __kmem_cache_destroy(kmem_cache_t
*cache
)
264 kmem_cache_cb_t
*kcc
;
269 spin_lock_irqsave(&kmem_cache_cb_lock
, flags
);
270 kcc
= kmem_cache_find_cache_cb(cache
);
271 spin_unlock_irqrestore(&kmem_cache_cb_lock
, flags
);
275 name
= (char *)kmem_cache_name(cache
);
276 rc
= kmem_cache_destroy(cache
);
277 kmem_cache_remove_cache_cb(kcc
);
280 /* Unregister generic shrinker on removal of all caches */
281 spin_lock_irqsave(&kmem_cache_cb_lock
, flags
);
282 if (list_empty(&kmem_cache_cb_list
))
283 remove_shrinker(kmem_cache_shrinker
);
285 spin_unlock_irqrestore(&kmem_cache_cb_lock
, flags
);
288 EXPORT_SYMBOL(__kmem_cache_destroy
);
292 /* Since there's no easy hook in to linux to force all the registered
293 * shrinkers to run we just run the ones registered for this shim */
294 kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);
296 EXPORT_SYMBOL(__kmem_reap
);
302 atomic64_set(&kmem_alloc_used
, 0);
303 atomic64_set(&vmem_alloc_used
, 0);
312 if (atomic64_read(&kmem_alloc_used
) != 0)
313 printk("spl: Warning kmem leaked %ld/%ld bytes\n",
314 atomic_read(&kmem_alloc_used
), kmem_alloc_max
);
316 if (atomic64_read(&vmem_alloc_used
) != 0)
317 printk("spl: Warning vmem leaked %ld/%ld bytes\n",
318 atomic_read(&vmem_alloc_used
), vmem_alloc_max
);