1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25 \*****************************************************************************/
27 #include <sys/debug.h>
29 #include <linux/module.h>
31 vmem_t
*heap_arena
= NULL
;
32 EXPORT_SYMBOL(heap_arena
);
34 vmem_t
*zio_alloc_arena
= NULL
;
35 EXPORT_SYMBOL(zio_alloc_arena
);
37 vmem_t
*zio_arena
= NULL
;
38 EXPORT_SYMBOL(zio_arena
);
41 vmem_size(vmem_t
*vmp
, int typemask
)
43 ASSERT3P(vmp
, ==, NULL
);
44 ASSERT3S(typemask
& VMEM_ALLOC
, ==, VMEM_ALLOC
);
45 ASSERT3S(typemask
& VMEM_FREE
, ==, VMEM_FREE
);
47 return (VMALLOC_TOTAL
);
49 EXPORT_SYMBOL(vmem_size
);
52 * Memory allocation interfaces and debugging for basic kmem_*
53 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
54 * the SPL will keep track of the total memory allocated, and
55 * report any memory leaked when the module is unloaded.
59 /* Shim layer memory accounting */
60 # ifdef HAVE_ATOMIC64_T
61 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
62 unsigned long long vmem_alloc_max
= 0;
63 # else /* HAVE_ATOMIC64_T */
64 atomic_t vmem_alloc_used
= ATOMIC_INIT(0);
65 unsigned long long vmem_alloc_max
= 0;
66 # endif /* HAVE_ATOMIC64_T */
68 EXPORT_SYMBOL(vmem_alloc_used
);
69 EXPORT_SYMBOL(vmem_alloc_max
);
71 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
72 * but also the location of every alloc and free. When the SPL module is
73 * unloaded a list of all leaked addresses and where they were allocated
74 * will be dumped to the console. Enabling this feature has a significant
75 * impact on performance but it makes finding memory leaks straight forward.
77 * Not surprisingly with debugging enabled the xmem_locks are very highly
78 * contended particularly on xfree(). If we want to run with this detailed
79 * debugging enabled for anything other than debugging we need to minimize
80 * the contention by moving to a lock per xmem_table entry model.
82 # ifdef DEBUG_KMEM_TRACKING
84 # define VMEM_HASH_BITS 10
85 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
87 typedef struct kmem_debug
{
88 struct hlist_node kd_hlist
; /* Hash node linkage */
89 struct list_head kd_list
; /* List of all allocations */
90 void *kd_addr
; /* Allocation pointer */
91 size_t kd_size
; /* Allocation size */
92 const char *kd_func
; /* Allocation function */
93 int kd_line
; /* Allocation line */
97 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
98 struct list_head vmem_list
;
100 EXPORT_SYMBOL(vmem_lock
);
101 EXPORT_SYMBOL(vmem_table
);
102 EXPORT_SYMBOL(vmem_list
);
105 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
109 unsigned long irq_flags
;
111 ASSERT(flags
& KM_SLEEP
);
113 /* Function may be called with KM_NOSLEEP so failure is possible */
114 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
115 flags
& ~__GFP_ZERO
);
116 if (unlikely(dptr
== NULL
)) {
117 printk(KERN_WARNING
"debug vmem_alloc(%ld, 0x%x) "
118 "at %s:%d failed (%lld/%llu)\n",
119 sizeof(kmem_debug_t
), flags
, func
, line
,
120 vmem_alloc_used_read(), vmem_alloc_max
);
123 * We use __strdup() below because the string pointed to by
124 * __FUNCTION__ might not be available by the time we want
125 * to print it, since the module might have been unloaded.
126 * This can never fail because we have already asserted
127 * that flags is KM_SLEEP.
129 dptr
->kd_func
= __strdup(func
, flags
& ~__GFP_ZERO
);
130 if (unlikely(dptr
->kd_func
== NULL
)) {
132 printk(KERN_WARNING
"debug __strdup() at %s:%d "
133 "failed (%lld/%llu)\n", func
, line
,
134 vmem_alloc_used_read(), vmem_alloc_max
);
138 /* Use the correct allocator */
139 if (flags
& __GFP_ZERO
) {
140 ptr
= vzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
142 ptr
= vmalloc_nofail(size
, flags
);
145 if (unlikely(ptr
== NULL
)) {
146 kfree(dptr
->kd_func
);
148 printk(KERN_WARNING
"vmem_alloc (%llu, 0x%x) "
149 "at %s:%d failed (%lld/%llu)\n",
150 (unsigned long long) size
, flags
, func
, line
,
151 vmem_alloc_used_read(), vmem_alloc_max
);
155 vmem_alloc_used_add(size
);
156 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
157 vmem_alloc_max
= vmem_alloc_used_read();
159 INIT_HLIST_NODE(&dptr
->kd_hlist
);
160 INIT_LIST_HEAD(&dptr
->kd_list
);
163 dptr
->kd_size
= size
;
164 dptr
->kd_line
= line
;
166 spin_lock_irqsave(&vmem_lock
, irq_flags
);
167 hlist_add_head(&dptr
->kd_hlist
,
168 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
169 list_add_tail(&dptr
->kd_list
, &vmem_list
);
170 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
175 EXPORT_SYMBOL(vmem_alloc_track
);
178 vmem_free_track(const void *ptr
, size_t size
)
182 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
183 (unsigned long long) size
);
185 /* Must exist in hash due to vmem_alloc() */
186 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
189 /* Size must match */
190 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
191 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
192 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
194 vmem_alloc_used_sub(size
);
195 kfree(dptr
->kd_func
);
197 memset((void *)dptr
, 0x5a, sizeof(kmem_debug_t
));
200 memset((void *)ptr
, 0x5a, size
);
203 EXPORT_SYMBOL(vmem_free_track
);
205 # else /* DEBUG_KMEM_TRACKING */
208 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
212 ASSERT(flags
& KM_SLEEP
);
214 /* Use the correct allocator */
215 if (flags
& __GFP_ZERO
) {
216 ptr
= vzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
218 ptr
= vmalloc_nofail(size
, flags
);
221 if (unlikely(ptr
== NULL
)) {
223 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
224 (unsigned long long)size
, flags
, func
, line
,
225 (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max
);
227 vmem_alloc_used_add(size
);
228 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
229 vmem_alloc_max
= vmem_alloc_used_read();
234 EXPORT_SYMBOL(vmem_alloc_debug
);
237 vmem_free_debug(const void *ptr
, size_t size
)
239 ASSERT(ptr
|| size
> 0);
240 vmem_alloc_used_sub(size
);
243 EXPORT_SYMBOL(vmem_free_debug
);
245 # endif /* DEBUG_KMEM_TRACKING */
246 #endif /* DEBUG_KMEM */
248 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
250 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
252 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
255 ASSERT(str
!= NULL
&& len
>= 17);
258 /* Check for a fully printable string, and while we are at
259 * it place the printable characters in the passed buffer. */
260 for (i
= 0; i
< size
; i
++) {
261 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
262 if (isprint(str
[i
])) {
265 /* Minimum number of printable characters found
266 * to make it worthwhile to print this as ascii. */
276 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
277 *((uint8_t *)kd
->kd_addr
),
278 *((uint8_t *)kd
->kd_addr
+ 2),
279 *((uint8_t *)kd
->kd_addr
+ 4),
280 *((uint8_t *)kd
->kd_addr
+ 6),
281 *((uint8_t *)kd
->kd_addr
+ 8),
282 *((uint8_t *)kd
->kd_addr
+ 10),
283 *((uint8_t *)kd
->kd_addr
+ 12),
284 *((uint8_t *)kd
->kd_addr
+ 14));
291 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
295 spin_lock_init(lock
);
296 INIT_LIST_HEAD(list
);
298 for (i
= 0; i
< size
; i
++)
299 INIT_HLIST_HEAD(&kmem_table
[i
]);
305 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
311 spin_lock_irqsave(lock
, flags
);
312 if (!list_empty(list
))
313 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
314 "size", "data", "func", "line");
316 list_for_each_entry(kd
, list
, kd_list
)
317 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
318 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
319 kd
->kd_func
, kd
->kd_line
);
321 spin_unlock_irqrestore(lock
, flags
);
323 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
324 #define spl_kmem_init_tracking(list, lock, size)
325 #define spl_kmem_fini_tracking(list, lock)
326 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
334 vmem_alloc_used_set(0);
335 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
345 /* Display all unreclaimed memory addresses, including the
346 * allocation size and the first few bytes of what's located
347 * at that address to aid in debugging. Performance is not
348 * a serious concern here since it is module unload time. */
349 if (vmem_alloc_used_read() != 0)
350 printk(KERN_WARNING
"vmem leaked %ld/%llu bytes\n",
351 vmem_alloc_used_read(), vmem_alloc_max
);
353 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
354 #endif /* DEBUG_KMEM */