2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/debug.h>
26 #include <sys/sysmacros.h>
32 * As a general rule kmem_alloc() allocations should be small, preferably
33 * just a few pages since they must by physically contiguous. Therefore, a
34 * rate limited warning will be printed to the console for any kmem_alloc()
35 * which exceeds a reasonable threshold.
37 * The default warning threshold is set to sixteen pages but capped at 64K to
38 * accommodate systems using large pages. This value was selected to be small
39 * enough to ensure the largest allocations are quickly noticed and fixed.
40 * But large enough to avoid logging any warnings when a allocation size is
41 * larger than optimal but not a serious concern. Since this value is tunable,
42 * developers are encouraged to set it lower when testing so any new largish
43 * allocations are quickly caught. These warnings may be disabled by setting
44 * the threshold to zero.
47 unsigned int spl_kmem_alloc_warn
= MIN(16 * PAGE_SIZE
, 64 * 1024);
48 module_param(spl_kmem_alloc_warn
, uint
, 0644);
49 MODULE_PARM_DESC(spl_kmem_alloc_warn
,
50 "Warning threshold in bytes for a kmem_alloc()");
51 EXPORT_SYMBOL(spl_kmem_alloc_warn
);
54 * Large kmem_alloc() allocations will fail if they exceed KMALLOC_MAX_SIZE.
55 * Allocations which are marginally smaller than this limit may succeed but
56 * should still be avoided due to the expense of locating a contiguous range
57 * of free pages. Therefore, a maximum kmem size with reasonable safely
58 * margin of 4x is set. Kmem_alloc() allocations larger than this maximum
59 * will quickly fail. Vmem_alloc() allocations less than or equal to this
60 * value will use kmalloc(), but shift to vmalloc() when exceeding this value.
62 unsigned int spl_kmem_alloc_max
= (KMALLOC_MAX_SIZE
>> 2);
63 module_param(spl_kmem_alloc_max
, uint
, 0644);
64 MODULE_PARM_DESC(spl_kmem_alloc_max
,
65 "Maximum size in bytes for a kmem_alloc()");
66 EXPORT_SYMBOL(spl_kmem_alloc_max
);
74 EXPORT_SYMBOL(kmem_debugging
);
77 kmem_vasprintf(const char *fmt
, va_list ap
)
84 ptr
= kvasprintf(kmem_flags_convert(KM_SLEEP
), fmt
, aq
);
86 } while (ptr
== NULL
);
90 EXPORT_SYMBOL(kmem_vasprintf
);
93 kmem_asprintf(const char *fmt
, ...)
100 ptr
= kvasprintf(kmem_flags_convert(KM_SLEEP
), fmt
, ap
);
102 } while (ptr
== NULL
);
106 EXPORT_SYMBOL(kmem_asprintf
);
109 __strdup(const char *str
, int flags
)
115 ptr
= kmalloc(n
+ 1, kmem_flags_convert(flags
));
117 memcpy(ptr
, str
, n
+ 1);
123 strdup(const char *str
)
125 return (__strdup(str
, KM_SLEEP
));
127 EXPORT_SYMBOL(strdup
);
134 EXPORT_SYMBOL(strfree
);
137 * General purpose unified implementation of kmem_alloc(). It is an
138 * amalgamation of Linux and Illumos allocator design. It should never be
139 * exported to ensure that code using kmem_alloc()/kmem_zalloc() remains
140 * relatively portable. Consumers may only access this function through
141 * wrappers that enforce the common flags to ensure portability.
144 spl_kmem_alloc_impl(size_t size
, int flags
, int node
)
146 gfp_t lflags
= kmem_flags_convert(flags
);
151 * Log abnormally large allocations and rate limit the console output.
152 * Allocations larger than spl_kmem_alloc_warn should be performed
153 * through the vmem_alloc()/vmem_zalloc() interfaces.
155 if ((spl_kmem_alloc_warn
> 0) && (size
> spl_kmem_alloc_warn
) &&
156 !(flags
& KM_VMEM
)) {
158 "Large kmem_alloc(%lu, 0x%x), please file an issue at:\n"
159 "https://github.com/zfsonlinux/zfs/issues/new\n",
160 (unsigned long)size
, flags
);
165 * Use a loop because kmalloc_node() can fail when GFP_KERNEL is used
166 * unlike kmem_alloc() with KM_SLEEP on Illumos.
170 * Calling kmalloc_node() when the size >= spl_kmem_alloc_max
171 * is unsafe. This must fail for all for kmem_alloc() and
172 * kmem_zalloc() callers.
174 * For vmem_alloc() and vmem_zalloc() callers it is permissible
175 * to use __vmalloc(). However, in general use of __vmalloc()
176 * is strongly discouraged because a global lock must be
177 * acquired. Contention on this lock can significantly
178 * impact performance so frequently manipulating the virtual
179 * address space is strongly discouraged.
181 if ((size
> spl_kmem_alloc_max
) || use_vmem
) {
182 if (flags
& KM_VMEM
) {
183 ptr
= __vmalloc(size
, lflags
| __GFP_HIGHMEM
,
189 ptr
= kmalloc_node(size
, lflags
, node
);
192 if (likely(ptr
) || (flags
& KM_NOSLEEP
))
196 * For vmem_alloc() and vmem_zalloc() callers retry immediately
197 * using __vmalloc() which is unlikely to fail.
199 if ((flags
& KM_VMEM
) && (use_vmem
== 0)) {
205 * Use cond_resched() instead of congestion_wait() to avoid
206 * deadlocking systems where there are no block devices.
215 spl_kmem_free_impl(const void *buf
, size_t size
)
217 if (is_vmalloc_addr(buf
))
224 * Memory allocation and accounting for kmem_* * style allocations. When
225 * DEBUG_KMEM is enabled the total memory allocated will be tracked and
226 * any memory leaked will be reported during module unload.
228 * ./configure --enable-debug-kmem
232 /* Shim layer memory accounting */
233 #ifdef HAVE_ATOMIC64_T
234 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
235 unsigned long long kmem_alloc_max
= 0;
236 #else /* HAVE_ATOMIC64_T */
237 atomic_t kmem_alloc_used
= ATOMIC_INIT(0);
238 unsigned long long kmem_alloc_max
= 0;
239 #endif /* HAVE_ATOMIC64_T */
241 EXPORT_SYMBOL(kmem_alloc_used
);
242 EXPORT_SYMBOL(kmem_alloc_max
);
245 spl_kmem_alloc_debug(size_t size
, int flags
, int node
)
249 ptr
= spl_kmem_alloc_impl(size
, flags
, node
);
251 kmem_alloc_used_add(size
);
252 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
253 kmem_alloc_max
= kmem_alloc_used_read();
260 spl_kmem_free_debug(const void *ptr
, size_t size
)
262 kmem_alloc_used_sub(size
);
263 spl_kmem_free_impl(ptr
, size
);
267 * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
268 * but also the location of every alloc and free. When the SPL module is
269 * unloaded a list of all leaked addresses and where they were allocated
270 * will be dumped to the console. Enabling this feature has a significant
271 * impact on performance but it makes finding memory leaks straight forward.
273 * Not surprisingly with debugging enabled the xmem_locks are very highly
274 * contended particularly on xfree(). If we want to run with this detailed
275 * debugging enabled for anything other than debugging we need to minimize
276 * the contention by moving to a lock per xmem_table entry model.
278 * ./configure --enable-debug-kmem-tracking
280 #ifdef DEBUG_KMEM_TRACKING
282 #include <linux/hash.h>
283 #include <linux/ctype.h>
285 #define KMEM_HASH_BITS 10
286 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
288 typedef struct kmem_debug
{
289 struct hlist_node kd_hlist
; /* Hash node linkage */
290 struct list_head kd_list
; /* List of all allocations */
291 void *kd_addr
; /* Allocation pointer */
292 size_t kd_size
; /* Allocation size */
293 const char *kd_func
; /* Allocation function */
294 int kd_line
; /* Allocation line */
297 static spinlock_t kmem_lock
;
298 static struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
299 static struct list_head kmem_list
;
301 static kmem_debug_t
*
302 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
,
303 int bits
, const void *addr
)
305 struct hlist_head
*head
;
306 struct hlist_node
*node
;
307 struct kmem_debug
*p
;
310 spin_lock_irqsave(lock
, flags
);
312 head
= &table
[hash_ptr((void *)addr
, bits
)];
313 hlist_for_each(node
, head
) {
314 p
= list_entry(node
, struct kmem_debug
, kd_hlist
);
315 if (p
->kd_addr
== addr
) {
316 hlist_del_init(&p
->kd_hlist
);
317 list_del_init(&p
->kd_list
);
318 spin_unlock_irqrestore(lock
, flags
);
323 spin_unlock_irqrestore(lock
, flags
);
329 spl_kmem_alloc_track(size_t size
, int flags
,
330 const char *func
, int line
, int node
)
334 unsigned long irq_flags
;
336 dptr
= kmalloc(sizeof (kmem_debug_t
), kmem_flags_convert(flags
));
340 dptr
->kd_func
= __strdup(func
, flags
);
341 if (dptr
->kd_func
== NULL
) {
346 ptr
= spl_kmem_alloc_debug(size
, flags
, node
);
348 kfree(dptr
->kd_func
);
353 INIT_HLIST_NODE(&dptr
->kd_hlist
);
354 INIT_LIST_HEAD(&dptr
->kd_list
);
357 dptr
->kd_size
= size
;
358 dptr
->kd_line
= line
;
360 spin_lock_irqsave(&kmem_lock
, irq_flags
);
361 hlist_add_head(&dptr
->kd_hlist
,
362 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
363 list_add_tail(&dptr
->kd_list
, &kmem_list
);
364 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
370 spl_kmem_free_track(const void *ptr
, size_t size
)
374 /* Ignore NULL pointer since we haven't tracked it at all */
378 /* Must exist in hash due to kmem_alloc() */
379 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
380 ASSERT3P(dptr
, !=, NULL
);
381 ASSERT3S(dptr
->kd_size
, ==, size
);
383 kfree(dptr
->kd_func
);
386 spl_kmem_free_debug(ptr
, size
);
388 #endif /* DEBUG_KMEM_TRACKING */
389 #endif /* DEBUG_KMEM */
392 * Public kmem_alloc(), kmem_zalloc() and kmem_free() interfaces.
395 spl_kmem_alloc(size_t size
, int flags
, const char *func
, int line
)
397 ASSERT0(flags
& ~KM_PUBLIC_MASK
);
399 #if !defined(DEBUG_KMEM)
400 return (spl_kmem_alloc_impl(size
, flags
, NUMA_NO_NODE
));
401 #elif !defined(DEBUG_KMEM_TRACKING)
402 return (spl_kmem_alloc_debug(size
, flags
, NUMA_NO_NODE
));
404 return (spl_kmem_alloc_track(size
, flags
, func
, line
, NUMA_NO_NODE
));
407 EXPORT_SYMBOL(spl_kmem_alloc
);
410 spl_kmem_zalloc(size_t size
, int flags
, const char *func
, int line
)
412 ASSERT0(flags
& ~KM_PUBLIC_MASK
);
416 #if !defined(DEBUG_KMEM)
417 return (spl_kmem_alloc_impl(size
, flags
, NUMA_NO_NODE
));
418 #elif !defined(DEBUG_KMEM_TRACKING)
419 return (spl_kmem_alloc_debug(size
, flags
, NUMA_NO_NODE
));
421 return (spl_kmem_alloc_track(size
, flags
, func
, line
, NUMA_NO_NODE
));
424 EXPORT_SYMBOL(spl_kmem_zalloc
);
427 spl_kmem_free(const void *buf
, size_t size
)
429 #if !defined(DEBUG_KMEM)
430 return (spl_kmem_free_impl(buf
, size
));
431 #elif !defined(DEBUG_KMEM_TRACKING)
432 return (spl_kmem_free_debug(buf
, size
));
434 return (spl_kmem_free_track(buf
, size
));
437 EXPORT_SYMBOL(spl_kmem_free
);
439 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
441 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
443 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
446 ASSERT(str
!= NULL
&& len
>= 17);
450 * Check for a fully printable string, and while we are at
451 * it place the printable characters in the passed buffer.
453 for (i
= 0; i
< size
; i
++) {
454 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
455 if (isprint(str
[i
])) {
459 * Minimum number of printable characters found
460 * to make it worthwhile to print this as ascii.
471 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
472 *((uint8_t *)kd
->kd_addr
),
473 *((uint8_t *)kd
->kd_addr
+ 2),
474 *((uint8_t *)kd
->kd_addr
+ 4),
475 *((uint8_t *)kd
->kd_addr
+ 6),
476 *((uint8_t *)kd
->kd_addr
+ 8),
477 *((uint8_t *)kd
->kd_addr
+ 10),
478 *((uint8_t *)kd
->kd_addr
+ 12),
479 *((uint8_t *)kd
->kd_addr
+ 14));
486 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
490 spin_lock_init(lock
);
491 INIT_LIST_HEAD(list
);
493 for (i
= 0; i
< size
; i
++)
494 INIT_HLIST_HEAD(&kmem_table
[i
]);
500 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
506 spin_lock_irqsave(lock
, flags
);
507 if (!list_empty(list
))
508 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
509 "size", "data", "func", "line");
511 list_for_each_entry(kd
, list
, kd_list
) {
512 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
513 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
514 kd
->kd_func
, kd
->kd_line
);
517 spin_unlock_irqrestore(lock
, flags
);
519 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
526 kmem_alloc_used_set(0);
530 #ifdef DEBUG_KMEM_TRACKING
531 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
532 #endif /* DEBUG_KMEM_TRACKING */
533 #endif /* DEBUG_KMEM */
543 * Display all unreclaimed memory addresses, including the
544 * allocation size and the first few bytes of what's located
545 * at that address to aid in debugging. Performance is not
546 * a serious concern here since it is module unload time.
548 if (kmem_alloc_used_read() != 0)
549 printk(KERN_WARNING
"kmem leaked %ld/%llu bytes\n",
550 (unsigned long)kmem_alloc_used_read(), kmem_alloc_max
);
552 #ifdef DEBUG_KMEM_TRACKING
553 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
554 #endif /* DEBUG_KMEM_TRACKING */
555 #endif /* DEBUG_KMEM */