]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-vmem.c
Fix kmem cstyle issues
[mirror_spl.git] / module / spl / spl-vmem.c
CommitLineData
b34b9563 1/*
e5b9b344
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
b34b9563 23 */
e5b9b344
BB
24
25#include <sys/debug.h>
26#include <sys/vmem.h>
27#include <linux/module.h>
28
29vmem_t *heap_arena = NULL;
30EXPORT_SYMBOL(heap_arena);
31
32vmem_t *zio_alloc_arena = NULL;
33EXPORT_SYMBOL(zio_alloc_arena);
34
35vmem_t *zio_arena = NULL;
36EXPORT_SYMBOL(zio_arena);
37
38size_t
39vmem_size(vmem_t *vmp, int typemask)
40{
41 ASSERT3P(vmp, ==, NULL);
42 ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
43 ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
44
45 return (VMALLOC_TOTAL);
46}
47EXPORT_SYMBOL(vmem_size);
48
49/*
50 * Memory allocation interfaces and debugging for basic kmem_*
51 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
52 * the SPL will keep track of the total memory allocated, and
53 * report any memory leaked when the module is unloaded.
54 */
55#ifdef DEBUG_KMEM
56
57/* Shim layer memory accounting */
b34b9563 58#ifdef HAVE_ATOMIC64_T
e5b9b344
BB
59atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
60unsigned long long vmem_alloc_max = 0;
b34b9563 61#else /* HAVE_ATOMIC64_T */
e5b9b344
BB
62atomic_t vmem_alloc_used = ATOMIC_INIT(0);
63unsigned long long vmem_alloc_max = 0;
b34b9563 64#endif /* HAVE_ATOMIC64_T */
e5b9b344
BB
65
66EXPORT_SYMBOL(vmem_alloc_used);
67EXPORT_SYMBOL(vmem_alloc_max);
68
b34b9563
BB
69/*
70 * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
e5b9b344
BB
71 * but also the location of every alloc and free. When the SPL module is
72 * unloaded a list of all leaked addresses and where they were allocated
73 * will be dumped to the console. Enabling this feature has a significant
74 * impact on performance but it makes finding memory leaks straight forward.
75 *
76 * Not surprisingly with debugging enabled the xmem_locks are very highly
77 * contended particularly on xfree(). If we want to run with this detailed
78 * debugging enabled for anything other than debugging we need to minimize
79 * the contention by moving to a lock per xmem_table entry model.
80 */
b34b9563 81#ifdef DEBUG_KMEM_TRACKING
e5b9b344 82
b34b9563
BB
83#define VMEM_HASH_BITS 10
84#define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
e5b9b344
BB
85
86typedef struct kmem_debug {
b34b9563
BB
87 struct hlist_node kd_hlist; /* Hash node linkage */
88 struct list_head kd_list; /* List of all allocations */
89 void *kd_addr; /* Allocation pointer */
90 size_t kd_size; /* Allocation size */
91 const char *kd_func; /* Allocation function */
92 int kd_line; /* Allocation line */
e5b9b344
BB
93} kmem_debug_t;
94
95spinlock_t vmem_lock;
96struct hlist_head vmem_table[VMEM_TABLE_SIZE];
97struct list_head vmem_list;
98
99EXPORT_SYMBOL(vmem_lock);
100EXPORT_SYMBOL(vmem_table);
101EXPORT_SYMBOL(vmem_list);
102
103void *
104vmem_alloc_track(size_t size, int flags, const char *func, int line)
105{
106 void *ptr = NULL;
107 kmem_debug_t *dptr;
108 unsigned long irq_flags;
109
110 ASSERT(flags & KM_SLEEP);
111
112 /* Function may be called with KM_NOSLEEP so failure is possible */
b34b9563 113 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof (kmem_debug_t),
e5b9b344
BB
114 flags & ~__GFP_ZERO);
115 if (unlikely(dptr == NULL)) {
116 printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) "
117 "at %s:%d failed (%lld/%llu)\n",
b34b9563 118 sizeof (kmem_debug_t), flags, func, line,
e5b9b344
BB
119 vmem_alloc_used_read(), vmem_alloc_max);
120 } else {
121 /*
122 * We use __strdup() below because the string pointed to by
123 * __FUNCTION__ might not be available by the time we want
124 * to print it, since the module might have been unloaded.
125 * This can never fail because we have already asserted
126 * that flags is KM_SLEEP.
127 */
128 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
129 if (unlikely(dptr->kd_func == NULL)) {
130 kfree(dptr);
131 printk(KERN_WARNING "debug __strdup() at %s:%d "
132 "failed (%lld/%llu)\n", func, line,
133 vmem_alloc_used_read(), vmem_alloc_max);
134 goto out;
135 }
136
137 /* Use the correct allocator */
138 if (flags & __GFP_ZERO) {
139 ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
140 } else {
141 ptr = vmalloc_nofail(size, flags);
142 }
143
144 if (unlikely(ptr == NULL)) {
145 kfree(dptr->kd_func);
146 kfree(dptr);
147 printk(KERN_WARNING "vmem_alloc (%llu, 0x%x) "
148 "at %s:%d failed (%lld/%llu)\n",
149 (unsigned long long) size, flags, func, line,
150 vmem_alloc_used_read(), vmem_alloc_max);
151 goto out;
152 }
153
154 vmem_alloc_used_add(size);
155 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
156 vmem_alloc_max = vmem_alloc_used_read();
157
158 INIT_HLIST_NODE(&dptr->kd_hlist);
159 INIT_LIST_HEAD(&dptr->kd_list);
160
161 dptr->kd_addr = ptr;
162 dptr->kd_size = size;
163 dptr->kd_line = line;
164
165 spin_lock_irqsave(&vmem_lock, irq_flags);
166 hlist_add_head(&dptr->kd_hlist,
167 &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
168 list_add_tail(&dptr->kd_list, &vmem_list);
169 spin_unlock_irqrestore(&vmem_lock, irq_flags);
170 }
171out:
172 return (ptr);
173}
174EXPORT_SYMBOL(vmem_alloc_track);
175
176void
177vmem_free_track(const void *ptr, size_t size)
178{
179 kmem_debug_t *dptr;
180
181 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
182 (unsigned long long) size);
183
184 /* Must exist in hash due to vmem_alloc() */
185 dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
186 ASSERT(dptr);
187
188 /* Size must match */
189 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
190 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
191 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
192
193 vmem_alloc_used_sub(size);
194 kfree(dptr->kd_func);
195
b34b9563 196 memset((void *)dptr, 0x5a, sizeof (kmem_debug_t));
e5b9b344
BB
197 kfree(dptr);
198
199 memset((void *)ptr, 0x5a, size);
200 vfree(ptr);
201}
202EXPORT_SYMBOL(vmem_free_track);
203
b34b9563 204#else /* DEBUG_KMEM_TRACKING */
e5b9b344
BB
205
206void *
207vmem_alloc_debug(size_t size, int flags, const char *func, int line)
208{
209 void *ptr;
210
211 ASSERT(flags & KM_SLEEP);
212
213 /* Use the correct allocator */
214 if (flags & __GFP_ZERO) {
215 ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
216 } else {
217 ptr = vmalloc_nofail(size, flags);
218 }
219
220 if (unlikely(ptr == NULL)) {
221 printk(KERN_WARNING
222 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
223 (unsigned long long)size, flags, func, line,
224 (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max);
225 } else {
226 vmem_alloc_used_add(size);
227 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
228 vmem_alloc_max = vmem_alloc_used_read();
229 }
230
231 return (ptr);
232}
233EXPORT_SYMBOL(vmem_alloc_debug);
234
235void
236vmem_free_debug(const void *ptr, size_t size)
237{
238 ASSERT(ptr || size > 0);
239 vmem_alloc_used_sub(size);
240 vfree(ptr);
241}
242EXPORT_SYMBOL(vmem_free_debug);
243
b34b9563 244#endif /* DEBUG_KMEM_TRACKING */
e5b9b344
BB
245#endif /* DEBUG_KMEM */
246
247#if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
248static char *
249spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
250{
251 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
252 int i, flag = 1;
253
254 ASSERT(str != NULL && len >= 17);
255 memset(str, 0, len);
256
b34b9563
BB
257 /*
258 * Check for a fully printable string, and while we are at
259 * it place the printable characters in the passed buffer.
260 */
e5b9b344
BB
261 for (i = 0; i < size; i++) {
262 str[i] = ((char *)(kd->kd_addr))[i];
263 if (isprint(str[i])) {
264 continue;
265 } else {
b34b9563
BB
266 /*
267 * Minimum number of printable characters found
268 * to make it worthwhile to print this as ascii.
269 */
e5b9b344
BB
270 if (i > min)
271 break;
272
273 flag = 0;
274 break;
275 }
276 }
277
278 if (!flag) {
279 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
b34b9563
BB
280 *((uint8_t *)kd->kd_addr),
281 *((uint8_t *)kd->kd_addr + 2),
282 *((uint8_t *)kd->kd_addr + 4),
283 *((uint8_t *)kd->kd_addr + 6),
284 *((uint8_t *)kd->kd_addr + 8),
285 *((uint8_t *)kd->kd_addr + 10),
286 *((uint8_t *)kd->kd_addr + 12),
287 *((uint8_t *)kd->kd_addr + 14));
e5b9b344
BB
288 }
289
b34b9563 290 return (str);
e5b9b344
BB
291}
292
293static int
294spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
295{
296 int i;
297
298 spin_lock_init(lock);
299 INIT_LIST_HEAD(list);
300
301 for (i = 0; i < size; i++)
302 INIT_HLIST_HEAD(&kmem_table[i]);
303
304 return (0);
305}
306
307static void
308spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
309{
310 unsigned long flags;
311 kmem_debug_t *kd;
312 char str[17];
313
314 spin_lock_irqsave(lock, flags);
315 if (!list_empty(list))
316 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
b34b9563 317 "size", "data", "func", "line");
e5b9b344
BB
318
319 list_for_each_entry(kd, list, kd_list)
320 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
b34b9563
BB
321 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
322 kd->kd_func, kd->kd_line);
e5b9b344
BB
323
324 spin_unlock_irqrestore(lock, flags);
325}
326#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
b34b9563
BB
327#define spl_kmem_init_tracking(list, lock, size)
328#define spl_kmem_fini_tracking(list, lock)
e5b9b344
BB
329#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
330
331int
332spl_vmem_init(void)
333{
334 int rc = 0;
335
336#ifdef DEBUG_KMEM
337 vmem_alloc_used_set(0);
338 spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
339#endif
340
341 return (rc);
342}
343
344void
345spl_vmem_fini(void)
346{
347#ifdef DEBUG_KMEM
b34b9563
BB
348 /*
349 * Display all unreclaimed memory addresses, including the
e5b9b344
BB
350 * allocation size and the first few bytes of what's located
351 * at that address to aid in debugging. Performance is not
b34b9563
BB
352 * a serious concern here since it is module unload time.
353 */
e5b9b344
BB
354 if (vmem_alloc_used_read() != 0)
355 printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n",
356 vmem_alloc_used_read(), vmem_alloc_max);
357
358 spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
359#endif /* DEBUG_KMEM */
360}