]> git.proxmox.com Git - mirror_spl.git/blob - src/spl/linux-kmem.c
Initial commit. All spl source written up to this point wrapped
[mirror_spl.git] / src / spl / linux-kmem.c
1 #include <sys/linux-kmem.h>
2
3 /*
4 * Memory allocation interfaces
5 */
6 #ifdef DEBUG_KMEM
7 /* Shim layer memory accounting */
8 atomic_t kmem_alloc_used;
9 unsigned int kmem_alloc_max;
10 #endif
11
12 /*
13 * Slab allocation interfaces
14 *
15 * While the linux slab implementation was inspired by solaris they
16 * have made some changes to the API which complicates this shim
17 * layer. For one thing the same symbol names are used with different
18 * arguments for the prototypes. To deal with this we must use the
19 * preprocessor to re-order arguments. Happily for us standard C says,
20 * "Macro's appearing in their own expansion are not reexpanded" so
21 * this does not result in an infinite recursion. Additionally the
22 * function pointers registered by solarias differ from those used
23 * by linux so a lookup and mapping from linux style callback to a
24 * solaris style callback is needed. There is some overhead in this
25 * operation which isn't horibile but it needs to be kept in mind.
26 */
27 typedef struct kmem_cache_cb {
28 struct list_head kcc_list;
29 kmem_cache_t * kcc_cache;
30 kmem_constructor_t kcc_constructor;
31 kmem_destructor_t kcc_destructor;
32 kmem_reclaim_t kcc_reclaim;
33 void * kcc_private;
34 void * kcc_vmp;
35 } kmem_cache_cb_t;
36
37
38 static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
39 //static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
40 static LIST_HEAD(kmem_cache_cb_list);
41 static struct shrinker *kmem_cache_shrinker;
42
43 /* Function must be called while holding the kmem_cache_cb_lock
44 * Because kmem_cache_t is an opaque datatype we're forced to
45 * match pointers to identify specific cache entires.
46 */
47 static kmem_cache_cb_t *
48 kmem_cache_find_cache_cb(kmem_cache_t *cache)
49 {
50 kmem_cache_cb_t *kcc;
51
52 list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
53 if (cache == kcc->kcc_cache)
54 return kcc;
55
56 return NULL;
57 }
58
59 static kmem_cache_cb_t *
60 kmem_cache_add_cache_cb(kmem_cache_t *cache,
61 kmem_constructor_t constructor,
62 kmem_destructor_t destructor,
63 kmem_reclaim_t reclaim,
64 void *priv, void *vmp)
65 {
66 kmem_cache_cb_t *kcc;
67
68 kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
69 if (kcc) {
70 kcc->kcc_cache = cache;
71 kcc->kcc_constructor = constructor;
72 kcc->kcc_destructor = destructor;
73 kcc->kcc_reclaim = reclaim;
74 kcc->kcc_private = priv;
75 kcc->kcc_vmp = vmp;
76 spin_lock(&kmem_cache_cb_lock);
77 list_add(&kcc->kcc_list, &kmem_cache_cb_list);
78 spin_unlock(&kmem_cache_cb_lock);
79 }
80
81 return kcc;
82 }
83
84 static void
85 kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
86 {
87 spin_lock(&kmem_cache_cb_lock);
88 list_del(&kcc->kcc_list);
89 spin_unlock(&kmem_cache_cb_lock);
90
91 if (kcc)
92 kfree(kcc);
93 }
94
95 static void
96 kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
97 {
98 kmem_cache_cb_t *kcc;
99
100 spin_lock(&kmem_cache_cb_lock);
101
102 /* Callback list must be in sync with linux slab caches */
103 kcc = kmem_cache_find_cache_cb(cache);
104 BUG_ON(!kcc);
105
106 kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
107 spin_unlock(&kmem_cache_cb_lock);
108 /* Linux constructor has no return code, silently eat it */
109 }
110
111 static void
112 kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
113 {
114 kmem_cache_cb_t *kcc;
115
116 spin_lock(&kmem_cache_cb_lock);
117
118 /* Callback list must be in sync with linux slab caches */
119 kcc = kmem_cache_find_cache_cb(cache);
120 BUG_ON(!kcc);
121
122 /* Solaris destructor takes no flags, silently eat them */
123 kcc->kcc_destructor(ptr, kcc->kcc_private);
124 spin_unlock(&kmem_cache_cb_lock);
125 }
126
127 /* XXX - Arguments are ignored */
128 static int
129 kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
130 {
131 kmem_cache_cb_t *kcc;
132 int total = 0;
133
134 /* Under linux a shrinker is not tightly coupled with a slab
135 * cache. In fact linux always systematically trys calling all
136 * registered shrinker callbacks until its target reclamation level
137 * is reached. Because of this we only register one shrinker
138 * function in the shim layer for all slab caches. And we always
139 * attempt to shrink all caches when this generic shrinker is called.
140 */
141 spin_lock(&kmem_cache_cb_lock);
142
143 list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
144 /* Under linux the desired number and gfp type of objects
145 * is passed to the reclaiming function as a sugested reclaim
146 * target. I do not pass these args on because reclaim
147 * policy is entirely up to the owner under solaris. We only
148 * pass on the pre-registered private data.
149 */
150 if (kcc->kcc_reclaim)
151 kcc->kcc_reclaim(kcc->kcc_private);
152
153 total += 1;
154 }
155
156 /* Under linux we should return the remaining number of entires in
157 * the cache. Unfortunately, I don't see an easy way to safely
158 * emulate this behavior so I'm returning one entry per cache which
159 * was registered with the generic shrinker. This should fake out
160 * the linux VM when it attempts to shrink caches.
161 */
162 spin_unlock(&kmem_cache_cb_lock);
163 return total;
164 }
165
166 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
167 * removed here to prevent a recursive substitution, we want to call
168 * the native linux version.
169 */
170 #undef kmem_cache_create
171 #undef kmem_cache_destroy
172
173 kmem_cache_t *
174 __kmem_cache_create(char *name, size_t size, size_t align,
175 int (*constructor)(void *, void *, int),
176 void (*destructor)(void *, void *),
177 void (*reclaim)(void *),
178 void *priv, void *vmp, int flags)
179 {
180 kmem_cache_t *cache;
181 kmem_cache_cb_t *kcc;
182 int shrinker_flag = 0;
183
184 /* FIXME: - Option currently unsupported by shim layer */
185 BUG_ON(vmp);
186
187 cache = kmem_cache_create(name, size, align, flags,
188 kmem_cache_generic_constructor,
189 kmem_cache_generic_destructor);
190 if (cache == NULL)
191 return NULL;
192
193 /* Register shared shrinker function on initial cache create */
194 spin_lock(&kmem_cache_cb_lock);
195 if (list_empty(&kmem_cache_cb_list)) {
196 kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
197 kmem_cache_generic_shrinker);
198 if (kmem_cache_shrinker == NULL) {
199 kmem_cache_destroy(cache);
200 spin_unlock(&kmem_cache_cb_lock);
201 return NULL;
202 }
203
204 }
205 spin_unlock(&kmem_cache_cb_lock);
206
207 kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
208 reclaim, priv, vmp);
209 if (kcc == NULL) {
210 if (shrinker_flag) /* New shrinker registered must be removed */
211 remove_shrinker(kmem_cache_shrinker);
212
213 kmem_cache_destroy(cache);
214 return NULL;
215 }
216
217 return cache;
218 }
219
220 /* Return codes discarded because Solaris implementation has void return */
221 void
222 __kmem_cache_destroy(kmem_cache_t *cache)
223 {
224 kmem_cache_cb_t *kcc;
225
226 spin_lock(&kmem_cache_cb_lock);
227 kcc = kmem_cache_find_cache_cb(cache);
228 spin_unlock(&kmem_cache_cb_lock);
229 if (kcc == NULL)
230 return;
231
232 kmem_cache_destroy(cache);
233 kmem_cache_remove_cache_cb(kcc);
234
235 /* Unregister generic shrinker on removal of all caches */
236 spin_lock(&kmem_cache_cb_lock);
237 if (list_empty(&kmem_cache_cb_list))
238 remove_shrinker(kmem_cache_shrinker);
239
240 spin_unlock(&kmem_cache_cb_lock);
241 }
242
243 void
244 __kmem_reap(void) {
245 /* Since there's no easy hook in to linux to force all the registered
246 * shrinkers to run we just run the ones registered for this shim */
247 kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
248 }
249