1 #include <sys/linux-kmem.h>
4 * Memory allocation interfaces
7 /* Shim layer memory accounting */
8 atomic_t kmem_alloc_used
;
9 unsigned int kmem_alloc_max
;
13 * Slab allocation interfaces
15 * While the linux slab implementation was inspired by solaris they
16 * have made some changes to the API which complicates this shim
17 * layer. For one thing the same symbol names are used with different
18 * arguments for the prototypes. To deal with this we must use the
19 * preprocessor to re-order arguments. Happily for us standard C says,
20 * "Macro's appearing in their own expansion are not reexpanded" so
21 * this does not result in an infinite recursion. Additionally the
22 * function pointers registered by solarias differ from those used
23 * by linux so a lookup and mapping from linux style callback to a
24 * solaris style callback is needed. There is some overhead in this
25 * operation which isn't horibile but it needs to be kept in mind.
27 typedef struct kmem_cache_cb
{
28 struct list_head kcc_list
;
29 kmem_cache_t
* kcc_cache
;
30 kmem_constructor_t kcc_constructor
;
31 kmem_destructor_t kcc_destructor
;
32 kmem_reclaim_t kcc_reclaim
;
38 static spinlock_t kmem_cache_cb_lock
= SPIN_LOCK_UNLOCKED
;
39 //static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
40 static LIST_HEAD(kmem_cache_cb_list
);
41 static struct shrinker
*kmem_cache_shrinker
;
43 /* Function must be called while holding the kmem_cache_cb_lock
44 * Because kmem_cache_t is an opaque datatype we're forced to
45 * match pointers to identify specific cache entires.
47 static kmem_cache_cb_t
*
48 kmem_cache_find_cache_cb(kmem_cache_t
*cache
)
52 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
)
53 if (cache
== kcc
->kcc_cache
)
59 static kmem_cache_cb_t
*
60 kmem_cache_add_cache_cb(kmem_cache_t
*cache
,
61 kmem_constructor_t constructor
,
62 kmem_destructor_t destructor
,
63 kmem_reclaim_t reclaim
,
64 void *priv
, void *vmp
)
68 kcc
= (kmem_cache_cb_t
*)kmalloc(sizeof(*kcc
), GFP_KERNEL
);
70 kcc
->kcc_cache
= cache
;
71 kcc
->kcc_constructor
= constructor
;
72 kcc
->kcc_destructor
= destructor
;
73 kcc
->kcc_reclaim
= reclaim
;
74 kcc
->kcc_private
= priv
;
76 spin_lock(&kmem_cache_cb_lock
);
77 list_add(&kcc
->kcc_list
, &kmem_cache_cb_list
);
78 spin_unlock(&kmem_cache_cb_lock
);
85 kmem_cache_remove_cache_cb(kmem_cache_cb_t
*kcc
)
87 spin_lock(&kmem_cache_cb_lock
);
88 list_del(&kcc
->kcc_list
);
89 spin_unlock(&kmem_cache_cb_lock
);
96 kmem_cache_generic_constructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
100 spin_lock(&kmem_cache_cb_lock
);
102 /* Callback list must be in sync with linux slab caches */
103 kcc
= kmem_cache_find_cache_cb(cache
);
106 kcc
->kcc_constructor(ptr
, kcc
->kcc_private
, (int)flags
);
107 spin_unlock(&kmem_cache_cb_lock
);
108 /* Linux constructor has no return code, silently eat it */
112 kmem_cache_generic_destructor(void *ptr
, kmem_cache_t
*cache
, unsigned long flags
)
114 kmem_cache_cb_t
*kcc
;
116 spin_lock(&kmem_cache_cb_lock
);
118 /* Callback list must be in sync with linux slab caches */
119 kcc
= kmem_cache_find_cache_cb(cache
);
122 /* Solaris destructor takes no flags, silently eat them */
123 kcc
->kcc_destructor(ptr
, kcc
->kcc_private
);
124 spin_unlock(&kmem_cache_cb_lock
);
127 /* XXX - Arguments are ignored */
129 kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
131 kmem_cache_cb_t
*kcc
;
134 /* Under linux a shrinker is not tightly coupled with a slab
135 * cache. In fact linux always systematically trys calling all
136 * registered shrinker callbacks until its target reclamation level
137 * is reached. Because of this we only register one shrinker
138 * function in the shim layer for all slab caches. And we always
139 * attempt to shrink all caches when this generic shrinker is called.
141 spin_lock(&kmem_cache_cb_lock
);
143 list_for_each_entry(kcc
, &kmem_cache_cb_list
, kcc_list
) {
144 /* Under linux the desired number and gfp type of objects
145 * is passed to the reclaiming function as a sugested reclaim
146 * target. I do not pass these args on because reclaim
147 * policy is entirely up to the owner under solaris. We only
148 * pass on the pre-registered private data.
150 if (kcc
->kcc_reclaim
)
151 kcc
->kcc_reclaim(kcc
->kcc_private
);
156 /* Under linux we should return the remaining number of entires in
157 * the cache. Unfortunately, I don't see an easy way to safely
158 * emulate this behavior so I'm returning one entry per cache which
159 * was registered with the generic shrinker. This should fake out
160 * the linux VM when it attempts to shrink caches.
162 spin_unlock(&kmem_cache_cb_lock
);
166 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
167 * removed here to prevent a recursive substitution, we want to call
168 * the native linux version.
170 #undef kmem_cache_create
171 #undef kmem_cache_destroy
174 __kmem_cache_create(char *name
, size_t size
, size_t align
,
175 int (*constructor
)(void *, void *, int),
176 void (*destructor
)(void *, void *),
177 void (*reclaim
)(void *),
178 void *priv
, void *vmp
, int flags
)
181 kmem_cache_cb_t
*kcc
;
182 int shrinker_flag
= 0;
184 /* FIXME: - Option currently unsupported by shim layer */
187 cache
= kmem_cache_create(name
, size
, align
, flags
,
188 kmem_cache_generic_constructor
,
189 kmem_cache_generic_destructor
);
193 /* Register shared shrinker function on initial cache create */
194 spin_lock(&kmem_cache_cb_lock
);
195 if (list_empty(&kmem_cache_cb_list
)) {
196 kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
197 kmem_cache_generic_shrinker
);
198 if (kmem_cache_shrinker
== NULL
) {
199 kmem_cache_destroy(cache
);
200 spin_unlock(&kmem_cache_cb_lock
);
205 spin_unlock(&kmem_cache_cb_lock
);
207 kcc
= kmem_cache_add_cache_cb(cache
, constructor
, destructor
,
210 if (shrinker_flag
) /* New shrinker registered must be removed */
211 remove_shrinker(kmem_cache_shrinker
);
213 kmem_cache_destroy(cache
);
220 /* Return codes discarded because Solaris implementation has void return */
222 __kmem_cache_destroy(kmem_cache_t
*cache
)
224 kmem_cache_cb_t
*kcc
;
226 spin_lock(&kmem_cache_cb_lock
);
227 kcc
= kmem_cache_find_cache_cb(cache
);
228 spin_unlock(&kmem_cache_cb_lock
);
232 kmem_cache_destroy(cache
);
233 kmem_cache_remove_cache_cb(kcc
);
235 /* Unregister generic shrinker on removal of all caches */
236 spin_lock(&kmem_cache_cb_lock
);
237 if (list_empty(&kmem_cache_cb_list
))
238 remove_shrinker(kmem_cache_shrinker
);
240 spin_unlock(&kmem_cache_cb_lock
);
245 /* Since there's no easy hook in to linux to force all the registered
246 * shrinkers to run we just run the ones registered for this shim */
247 kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);