]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-vmem.c
Refactor existing code
[mirror_spl.git] / module / spl / spl-vmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25 \*****************************************************************************/
26
27 #include <sys/debug.h>
28 #include <sys/vmem.h>
29 #include <linux/module.h>
30
31 vmem_t *heap_arena = NULL;
32 EXPORT_SYMBOL(heap_arena);
33
34 vmem_t *zio_alloc_arena = NULL;
35 EXPORT_SYMBOL(zio_alloc_arena);
36
37 vmem_t *zio_arena = NULL;
38 EXPORT_SYMBOL(zio_arena);
39
40 size_t
41 vmem_size(vmem_t *vmp, int typemask)
42 {
43 ASSERT3P(vmp, ==, NULL);
44 ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
45 ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
46
47 return (VMALLOC_TOTAL);
48 }
49 EXPORT_SYMBOL(vmem_size);
50
51 /*
52 * Memory allocation interfaces and debugging for basic kmem_*
53 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
54 * the SPL will keep track of the total memory allocated, and
55 * report any memory leaked when the module is unloaded.
56 */
57 #ifdef DEBUG_KMEM
58
59 /* Shim layer memory accounting */
60 # ifdef HAVE_ATOMIC64_T
61 atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
62 unsigned long long vmem_alloc_max = 0;
63 # else /* HAVE_ATOMIC64_T */
64 atomic_t vmem_alloc_used = ATOMIC_INIT(0);
65 unsigned long long vmem_alloc_max = 0;
66 # endif /* HAVE_ATOMIC64_T */
67
68 EXPORT_SYMBOL(vmem_alloc_used);
69 EXPORT_SYMBOL(vmem_alloc_max);
70
71 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
72 * but also the location of every alloc and free. When the SPL module is
73 * unloaded a list of all leaked addresses and where they were allocated
74 * will be dumped to the console. Enabling this feature has a significant
75 * impact on performance but it makes finding memory leaks straight forward.
76 *
77 * Not surprisingly with debugging enabled the xmem_locks are very highly
78 * contended particularly on xfree(). If we want to run with this detailed
79 * debugging enabled for anything other than debugging we need to minimize
80 * the contention by moving to a lock per xmem_table entry model.
81 */
82 # ifdef DEBUG_KMEM_TRACKING
83
84 # define VMEM_HASH_BITS 10
85 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
86
87 typedef struct kmem_debug {
88 struct hlist_node kd_hlist; /* Hash node linkage */
89 struct list_head kd_list; /* List of all allocations */
90 void *kd_addr; /* Allocation pointer */
91 size_t kd_size; /* Allocation size */
92 const char *kd_func; /* Allocation function */
93 int kd_line; /* Allocation line */
94 } kmem_debug_t;
95
96 spinlock_t vmem_lock;
97 struct hlist_head vmem_table[VMEM_TABLE_SIZE];
98 struct list_head vmem_list;
99
100 EXPORT_SYMBOL(vmem_lock);
101 EXPORT_SYMBOL(vmem_table);
102 EXPORT_SYMBOL(vmem_list);
103
104 void *
105 vmem_alloc_track(size_t size, int flags, const char *func, int line)
106 {
107 void *ptr = NULL;
108 kmem_debug_t *dptr;
109 unsigned long irq_flags;
110
111 ASSERT(flags & KM_SLEEP);
112
113 /* Function may be called with KM_NOSLEEP so failure is possible */
114 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
115 flags & ~__GFP_ZERO);
116 if (unlikely(dptr == NULL)) {
117 printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) "
118 "at %s:%d failed (%lld/%llu)\n",
119 sizeof(kmem_debug_t), flags, func, line,
120 vmem_alloc_used_read(), vmem_alloc_max);
121 } else {
122 /*
123 * We use __strdup() below because the string pointed to by
124 * __FUNCTION__ might not be available by the time we want
125 * to print it, since the module might have been unloaded.
126 * This can never fail because we have already asserted
127 * that flags is KM_SLEEP.
128 */
129 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
130 if (unlikely(dptr->kd_func == NULL)) {
131 kfree(dptr);
132 printk(KERN_WARNING "debug __strdup() at %s:%d "
133 "failed (%lld/%llu)\n", func, line,
134 vmem_alloc_used_read(), vmem_alloc_max);
135 goto out;
136 }
137
138 /* Use the correct allocator */
139 if (flags & __GFP_ZERO) {
140 ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
141 } else {
142 ptr = vmalloc_nofail(size, flags);
143 }
144
145 if (unlikely(ptr == NULL)) {
146 kfree(dptr->kd_func);
147 kfree(dptr);
148 printk(KERN_WARNING "vmem_alloc (%llu, 0x%x) "
149 "at %s:%d failed (%lld/%llu)\n",
150 (unsigned long long) size, flags, func, line,
151 vmem_alloc_used_read(), vmem_alloc_max);
152 goto out;
153 }
154
155 vmem_alloc_used_add(size);
156 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
157 vmem_alloc_max = vmem_alloc_used_read();
158
159 INIT_HLIST_NODE(&dptr->kd_hlist);
160 INIT_LIST_HEAD(&dptr->kd_list);
161
162 dptr->kd_addr = ptr;
163 dptr->kd_size = size;
164 dptr->kd_line = line;
165
166 spin_lock_irqsave(&vmem_lock, irq_flags);
167 hlist_add_head(&dptr->kd_hlist,
168 &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
169 list_add_tail(&dptr->kd_list, &vmem_list);
170 spin_unlock_irqrestore(&vmem_lock, irq_flags);
171 }
172 out:
173 return (ptr);
174 }
175 EXPORT_SYMBOL(vmem_alloc_track);
176
177 void
178 vmem_free_track(const void *ptr, size_t size)
179 {
180 kmem_debug_t *dptr;
181
182 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
183 (unsigned long long) size);
184
185 /* Must exist in hash due to vmem_alloc() */
186 dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
187 ASSERT(dptr);
188
189 /* Size must match */
190 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
191 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
192 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
193
194 vmem_alloc_used_sub(size);
195 kfree(dptr->kd_func);
196
197 memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
198 kfree(dptr);
199
200 memset((void *)ptr, 0x5a, size);
201 vfree(ptr);
202 }
203 EXPORT_SYMBOL(vmem_free_track);
204
205 # else /* DEBUG_KMEM_TRACKING */
206
207 void *
208 vmem_alloc_debug(size_t size, int flags, const char *func, int line)
209 {
210 void *ptr;
211
212 ASSERT(flags & KM_SLEEP);
213
214 /* Use the correct allocator */
215 if (flags & __GFP_ZERO) {
216 ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
217 } else {
218 ptr = vmalloc_nofail(size, flags);
219 }
220
221 if (unlikely(ptr == NULL)) {
222 printk(KERN_WARNING
223 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
224 (unsigned long long)size, flags, func, line,
225 (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max);
226 } else {
227 vmem_alloc_used_add(size);
228 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
229 vmem_alloc_max = vmem_alloc_used_read();
230 }
231
232 return (ptr);
233 }
234 EXPORT_SYMBOL(vmem_alloc_debug);
235
236 void
237 vmem_free_debug(const void *ptr, size_t size)
238 {
239 ASSERT(ptr || size > 0);
240 vmem_alloc_used_sub(size);
241 vfree(ptr);
242 }
243 EXPORT_SYMBOL(vmem_free_debug);
244
245 # endif /* DEBUG_KMEM_TRACKING */
246 #endif /* DEBUG_KMEM */
247
248 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
249 static char *
250 spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
251 {
252 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
253 int i, flag = 1;
254
255 ASSERT(str != NULL && len >= 17);
256 memset(str, 0, len);
257
258 /* Check for a fully printable string, and while we are at
259 * it place the printable characters in the passed buffer. */
260 for (i = 0; i < size; i++) {
261 str[i] = ((char *)(kd->kd_addr))[i];
262 if (isprint(str[i])) {
263 continue;
264 } else {
265 /* Minimum number of printable characters found
266 * to make it worthwhile to print this as ascii. */
267 if (i > min)
268 break;
269
270 flag = 0;
271 break;
272 }
273 }
274
275 if (!flag) {
276 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
277 *((uint8_t *)kd->kd_addr),
278 *((uint8_t *)kd->kd_addr + 2),
279 *((uint8_t *)kd->kd_addr + 4),
280 *((uint8_t *)kd->kd_addr + 6),
281 *((uint8_t *)kd->kd_addr + 8),
282 *((uint8_t *)kd->kd_addr + 10),
283 *((uint8_t *)kd->kd_addr + 12),
284 *((uint8_t *)kd->kd_addr + 14));
285 }
286
287 return str;
288 }
289
290 static int
291 spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
292 {
293 int i;
294
295 spin_lock_init(lock);
296 INIT_LIST_HEAD(list);
297
298 for (i = 0; i < size; i++)
299 INIT_HLIST_HEAD(&kmem_table[i]);
300
301 return (0);
302 }
303
304 static void
305 spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
306 {
307 unsigned long flags;
308 kmem_debug_t *kd;
309 char str[17];
310
311 spin_lock_irqsave(lock, flags);
312 if (!list_empty(list))
313 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
314 "size", "data", "func", "line");
315
316 list_for_each_entry(kd, list, kd_list)
317 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
318 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
319 kd->kd_func, kd->kd_line);
320
321 spin_unlock_irqrestore(lock, flags);
322 }
323 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
324 #define spl_kmem_init_tracking(list, lock, size)
325 #define spl_kmem_fini_tracking(list, lock)
326 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
327
328 int
329 spl_vmem_init(void)
330 {
331 int rc = 0;
332
333 #ifdef DEBUG_KMEM
334 vmem_alloc_used_set(0);
335 spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
336 #endif
337
338 return (rc);
339 }
340
341 void
342 spl_vmem_fini(void)
343 {
344 #ifdef DEBUG_KMEM
345 /* Display all unreclaimed memory addresses, including the
346 * allocation size and the first few bytes of what's located
347 * at that address to aid in debugging. Performance is not
348 * a serious concern here since it is module unload time. */
349 if (vmem_alloc_used_read() != 0)
350 printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n",
351 vmem_alloc_used_read(), vmem_alloc_max);
352
353 spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
354 #endif /* DEBUG_KMEM */
355 }