4 * Memory allocation interfaces
7 /* Shim layer memory accounting */
8 atomic64_t kmem_alloc_used
;
9 unsigned long kmem_alloc_max
= 0;
10 atomic64_t vmem_alloc_used
;
11 unsigned long vmem_alloc_max
= 0;
12 int kmem_warning_flag
= 1;
14 EXPORT_SYMBOL(kmem_alloc_used
);
15 EXPORT_SYMBOL(kmem_alloc_max
);
16 EXPORT_SYMBOL(vmem_alloc_used
);
17 EXPORT_SYMBOL(vmem_alloc_max
);
18 EXPORT_SYMBOL(kmem_warning_flag
);
20 int kmem_set_warning(int flag
) { return (kmem_warning_flag
= !!flag
); }
22 int kmem_set_warning(int flag
) { return 0; }
24 EXPORT_SYMBOL(kmem_set_warning
);
27 * Slab allocation interfaces
29 * While the linux slab implementation was inspired by solaris they
30 * have made some changes to the API which complicates this shim
31 * layer. For one thing the same symbol names are used with different
32 * arguments for the prototypes. To deal with this we must use the
33 * preprocessor to re-order arguments. Happily for us standard C says,
34 * "Macro's appearing in their own expansion are not reexpanded" so
35 * this does not result in an infinite recursion. Additionally the
36 * function pointers registered by solarias differ from those used
37 * by linux so a lookup and mapping from linux style callback to a
38 * solaris style callback is needed. There is some overhead in this
39 * operation which isn't horibile but it needs to be kept in mind.
41 typedef struct kmem_cache_cb
{
42 struct list_head kcc_list
;
43 kmem_cache_t
* kcc_cache
;
44 kmem_constructor_t kcc_constructor
;
45 kmem_destructor_t kcc_destructor
;
46 kmem_reclaim_t kcc_reclaim
;
52 static spinlock_t kmem_cache_cb_lock
= SPIN_LOCK_UNLOCKED
;
53 //static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
54 static LIST_HEAD(kmem_cache_cb_list
);
55 static struct shrinker
*kmem_cache_shrinker
;
57 /* Function must be called while holding the kmem_cache_cb_lock
58 * Because kmem_cache_t is an opaque datatype we're forced to
59 * match pointers to identify specific cache entires.
61 static kmem_cache_cb_t
*
62 kmem_cache_find_cache_cb(kmem_cache_t
*cache
)
66 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
)
67 if (cache
== kcc
->kcc_cache
)
73 static kmem_cache_cb_t
*
74 kmem_cache_add_cache_cb(kmem_cache_t
*cache
,
75 kmem_constructor_t constructor
,
76 kmem_destructor_t destructor
,
77 kmem_reclaim_t reclaim
,
78 void *priv
, void *vmp
)
82 kcc
= (kmem_cache_cb_t
*)kmalloc(sizeof(*kcc
), GFP_KERNEL
);
84 kcc
->kcc_cache
= cache
;
85 kcc
->kcc_constructor
= constructor
;
86 kcc
->kcc_destructor
= destructor
;
87 kcc
->kcc_reclaim
= reclaim
;
88 kcc
->kcc_private
= priv
;
90 spin_lock(&kmem_cache_cb_lock
);
91 list_add(&kcc
->kcc_list
, &kmem_cache_cb_list
);
92 spin_unlock(&kmem_cache_cb_lock
);
99 kmem_cache_remove_cache_cb(kmem_cache_cb_t
*kcc
)
101 spin_lock(&kmem_cache_cb_lock
);
102 list_del(&kcc
->kcc_list
);
103 spin_unlock(&kmem_cache_cb_lock
);
110 kmem_cache_generic_constructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
112 kmem_cache_cb_t
*kcc
;
114 spin_lock(&kmem_cache_cb_lock
);
116 /* Callback list must be in sync with linux slab caches */
117 kcc
= kmem_cache_find_cache_cb(cache
);
120 kcc
->kcc_constructor(ptr
, kcc
->kcc_private
, (int)flags
);
121 spin_unlock(&kmem_cache_cb_lock
);
122 /* Linux constructor has no return code, silently eat it */
126 kmem_cache_generic_destructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
128 kmem_cache_cb_t
*kcc
;
130 spin_lock(&kmem_cache_cb_lock
);
132 /* Callback list must be in sync with linux slab caches */
133 kcc
= kmem_cache_find_cache_cb(cache
);
136 /* Solaris destructor takes no flags, silently eat them */
137 kcc
->kcc_destructor(ptr
, kcc
->kcc_private
);
138 spin_unlock(&kmem_cache_cb_lock
);
141 /* XXX - Arguments are ignored */
143 kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
145 kmem_cache_cb_t
*kcc
;
148 /* Under linux a shrinker is not tightly coupled with a slab
149 * cache. In fact linux always systematically trys calling all
150 * registered shrinker callbacks until its target reclamation level
151 * is reached. Because of this we only register one shrinker
152 * function in the shim layer for all slab caches. And we always
153 * attempt to shrink all caches when this generic shrinker is called.
155 spin_lock(&kmem_cache_cb_lock
);
157 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
) {
158 /* Under linux the desired number and gfp type of objects
159 * is passed to the reclaiming function as a sugested reclaim
160 * target. I do not pass these args on because reclaim
161 * policy is entirely up to the owner under solaris. We only
162 * pass on the pre-registered private data.
164 if (kcc
->kcc_reclaim
)
165 kcc
->kcc_reclaim(kcc
->kcc_private
);
170 /* Under linux we should return the remaining number of entires in
171 * the cache. Unfortunately, I don't see an easy way to safely
172 * emulate this behavior so I'm returning one entry per cache which
173 * was registered with the generic shrinker. This should fake out
174 * the linux VM when it attempts to shrink caches.
176 spin_unlock(&kmem_cache_cb_lock
);
180 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
181 * removed here to prevent a recursive substitution, we want to call
182 * the native linux version.
184 #undef kmem_cache_create
185 #undef kmem_cache_destroy
188 __kmem_cache_create(char *name
, size_t size
, size_t align
,
189 kmem_constructor_t constructor
,
190 kmem_destructor_t destructor
,
191 kmem_reclaim_t reclaim
,
192 void *priv
, void *vmp
, int flags
)
195 kmem_cache_cb_t
*kcc
;
196 int shrinker_flag
= 0;
199 /* FIXME: - Option currently unsupported by shim layer */
202 cache_name
= kzalloc(strlen(name
) + 1, GFP_KERNEL
);
203 if (cache_name
== NULL
)
206 strcpy(cache_name
, name
);
207 cache
= kmem_cache_create(cache_name
, size
, align
, flags
,
208 kmem_cache_generic_constructor
,
209 kmem_cache_generic_destructor
);
213 /* Register shared shrinker function on initial cache create */
214 spin_lock(&kmem_cache_cb_lock
);
215 if (list_empty(&kmem_cache_cb_list
)) {
216 kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
217 kmem_cache_generic_shrinker
);
218 if (kmem_cache_shrinker
== NULL
) {
219 kmem_cache_destroy(cache
);
220 spin_unlock(&kmem_cache_cb_lock
);
225 spin_unlock(&kmem_cache_cb_lock
);
227 kcc
= kmem_cache_add_cache_cb(cache
, constructor
, destructor
,
230 if (shrinker_flag
) /* New shrinker registered must be removed */
231 remove_shrinker(kmem_cache_shrinker
);
233 kmem_cache_destroy(cache
);
239 EXPORT_SYMBOL(__kmem_cache_create
);
241 /* Return code provided despite Solaris's void return. There should be no
242 * harm here since the Solaris versions will ignore it anyway. */
244 __kmem_cache_destroy(kmem_cache_t
*cache
)
246 kmem_cache_cb_t
*kcc
;
250 spin_lock(&kmem_cache_cb_lock
);
251 kcc
= kmem_cache_find_cache_cb(cache
);
252 spin_unlock(&kmem_cache_cb_lock
);
256 name
= (char *)kmem_cache_name(cache
);
257 rc
= kmem_cache_destroy(cache
);
258 kmem_cache_remove_cache_cb(kcc
);
261 /* Unregister generic shrinker on removal of all caches */
262 spin_lock(&kmem_cache_cb_lock
);
263 if (list_empty(&kmem_cache_cb_list
))
264 remove_shrinker(kmem_cache_shrinker
);
266 spin_unlock(&kmem_cache_cb_lock
);
269 EXPORT_SYMBOL(__kmem_cache_destroy
);
273 /* Since there's no easy hook in to linux to force all the registered
274 * shrinkers to run we just run the ones registered for this shim */
275 kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);
277 EXPORT_SYMBOL(__kmem_reap
);
283 atomic64_set(&kmem_alloc_used
, 0);
284 atomic64_set(&vmem_alloc_used
, 0);
293 if (atomic64_read(&kmem_alloc_used
) != 0)
294 printk("spl: Warning kmem leaked %ld/%ld bytes\n",
295 atomic_read(&kmem_alloc_used
), kmem_alloc_max
);
297 if (atomic64_read(&vmem_alloc_used
) != 0)
298 printk("spl: Warning vmem leaked %ld/%ld bytes\n",
299 atomic_read(&vmem_alloc_used
), vmem_alloc_max
);