]> git.proxmox.com Git - mirror_zfs.git/blob - module/spl/spl-kmem.c
Add hooks for disabling direct reclaim
[mirror_zfs.git] / module / spl / spl-kmem.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #include <sys/debug.h>
26 #include <sys/sysmacros.h>
27 #include <sys/kmem.h>
28 #include <sys/vmem.h>
29 #include <linux/mm.h>
30 #include <linux/ratelimit.h>
31
32 /*
33 * As a general rule kmem_alloc() allocations should be small, preferably
34 * just a few pages since they must by physically contiguous. Therefore, a
35 * rate limited warning will be printed to the console for any kmem_alloc()
36 * which exceeds a reasonable threshold.
37 *
38 * The default warning threshold is set to eight pages but capped at 32K to
39 * accommodate systems using large pages. This value was selected to be small
40 * enough to ensure the largest allocations are quickly noticed and fixed.
41 * But large enough to avoid logging any warnings when a allocation size is
42 * larger than optimal but not a serious concern. Since this value is tunable,
43 * developers are encouraged to set it lower when testing so any new largish
44 * allocations are quickly caught. These warnings may be disabled by setting
45 * the threshold to zero.
46 */
47 unsigned int spl_kmem_alloc_warn = MAX(8 * PAGE_SIZE, 32 * 1024);
48 module_param(spl_kmem_alloc_warn, uint, 0644);
49 MODULE_PARM_DESC(spl_kmem_alloc_warn,
50 "Warning threshold in bytes for a kmem_alloc()");
51 EXPORT_SYMBOL(spl_kmem_alloc_warn);
52
53 /*
54 * Large kmem_alloc() allocations will fail if they exceed KMALLOC_MAX_SIZE.
55 * Allocations which are marginally smaller than this limit may succeed but
56 * should still be avoided due to the expense of locating a contiguous range
57 * of free pages. Therefore, a maximum kmem size with reasonable safely
58 * margin of 4x is set. Kmem_alloc() allocations larger than this maximum
59 * will quickly fail. Vmem_alloc() allocations less than or equal to this
60 * value will use kmalloc(), but shift to vmalloc() when exceeding this value.
61 */
62 unsigned int spl_kmem_alloc_max = (KMALLOC_MAX_SIZE >> 2);
63 module_param(spl_kmem_alloc_max, uint, 0644);
64 MODULE_PARM_DESC(spl_kmem_alloc_max,
65 "Maximum size in bytes for a kmem_alloc()");
66 EXPORT_SYMBOL(spl_kmem_alloc_max);
67
68 int
69 kmem_debugging(void)
70 {
71 return (0);
72 }
73 EXPORT_SYMBOL(kmem_debugging);
74
75 char *
76 kmem_vasprintf(const char *fmt, va_list ap)
77 {
78 va_list aq;
79 char *ptr;
80
81 do {
82 va_copy(aq, ap);
83 ptr = kvasprintf(GFP_KERNEL, fmt, aq);
84 va_end(aq);
85 } while (ptr == NULL);
86
87 return (ptr);
88 }
89 EXPORT_SYMBOL(kmem_vasprintf);
90
91 char *
92 kmem_asprintf(const char *fmt, ...)
93 {
94 va_list ap;
95 char *ptr;
96
97 do {
98 va_start(ap, fmt);
99 ptr = kvasprintf(GFP_KERNEL, fmt, ap);
100 va_end(ap);
101 } while (ptr == NULL);
102
103 return (ptr);
104 }
105 EXPORT_SYMBOL(kmem_asprintf);
106
107 static char *
108 __strdup(const char *str, int flags)
109 {
110 char *ptr;
111 int n;
112
113 n = strlen(str);
114 ptr = kmalloc(n + 1, kmem_flags_convert(flags));
115 if (ptr)
116 memcpy(ptr, str, n + 1);
117
118 return (ptr);
119 }
120
121 char *
122 strdup(const char *str)
123 {
124 return (__strdup(str, KM_SLEEP));
125 }
126 EXPORT_SYMBOL(strdup);
127
128 void
129 strfree(char *str)
130 {
131 kfree(str);
132 }
133 EXPORT_SYMBOL(strfree);
134
135 /*
136 * Limit the number of large allocation stack traces dumped to not more than
137 * 5 every 60 seconds to prevent denial-of-service attacks from debug code.
138 */
139 DEFINE_RATELIMIT_STATE(kmem_alloc_ratelimit_state, 60 * HZ, 5);
140
141 /*
142 * General purpose unified implementation of kmem_alloc(). It is an
143 * amalgamation of Linux and Illumos allocator design. It should never be
144 * exported to ensure that code using kmem_alloc()/kmem_zalloc() remains
145 * relatively portable. Consumers may only access this function through
146 * wrappers that enforce the common flags to ensure portability.
147 */
148 inline void *
149 spl_kmem_alloc_impl(size_t size, int flags, int node)
150 {
151 gfp_t lflags = kmem_flags_convert(flags);
152 void *ptr;
153
154 /*
155 * Log abnormally large allocations and rate limit the console output.
156 * Allocations larger than spl_kmem_alloc_warn should be performed
157 * through the vmem_alloc()/vmem_zalloc() interfaces.
158 */
159 if ((spl_kmem_alloc_warn > 0) && (size > spl_kmem_alloc_warn) &&
160 !(flags & KM_VMEM) && __ratelimit(&kmem_alloc_ratelimit_state)) {
161 printk(KERN_WARNING
162 "Large kmem_alloc(%lu, 0x%x), please file an issue at:\n"
163 "https://github.com/zfsonlinux/zfs/issues/new\n",
164 (unsigned long)size, flags);
165 dump_stack();
166 }
167
168 /*
169 * Use a loop because kmalloc_node() can fail when GFP_KERNEL is used
170 * unlike kmem_alloc() with KM_SLEEP on Illumos.
171 */
172 do {
173 /*
174 * Calling kmalloc_node() when the size >= spl_kmem_alloc_max
175 * is unsafe. This must fail for all for kmem_alloc() and
176 * kmem_zalloc() callers.
177 *
178 * For vmem_alloc() and vmem_zalloc() callers it is permissible
179 * to use __vmalloc(). However, in general use of __vmalloc()
180 * is strongly discouraged because a global lock must be
181 * acquired. Contention on this lock can significantly
182 * impact performance so frequently manipulating the virtual
183 * address space is strongly discouraged.
184 */
185 if (unlikely(size > spl_kmem_alloc_max)) {
186 if (flags & KM_VMEM) {
187 ptr = spl_vmalloc(size, lflags, PAGE_KERNEL);
188 } else {
189 return (NULL);
190 }
191 } else {
192 ptr = kmalloc_node(size, lflags, node);
193 }
194
195 if (likely(ptr) || (flags & KM_NOSLEEP))
196 return (ptr);
197
198 if (unlikely(__ratelimit(&kmem_alloc_ratelimit_state))) {
199 printk(KERN_WARNING
200 "Possible memory allocation deadlock: "
201 "size=%lu lflags=0x%x",
202 (unsigned long)size, lflags);
203 dump_stack();
204 }
205
206 /*
207 * Use cond_resched() instead of congestion_wait() to avoid
208 * deadlocking systems where there are no block devices.
209 */
210 cond_resched();
211 } while (1);
212
213 return (NULL);
214 }
215
216 inline void
217 spl_kmem_free_impl(const void *buf, size_t size)
218 {
219 if (is_vmalloc_addr(buf))
220 vfree(buf);
221 else
222 kfree(buf);
223 }
224
225 /*
226 * Memory allocation and accounting for kmem_* * style allocations. When
227 * DEBUG_KMEM is enabled the total memory allocated will be tracked and
228 * any memory leaked will be reported during module unload.
229 *
230 * ./configure --enable-debug-kmem
231 */
232 #ifdef DEBUG_KMEM
233
234 /* Shim layer memory accounting */
235 #ifdef HAVE_ATOMIC64_T
236 atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
237 unsigned long long kmem_alloc_max = 0;
238 #else /* HAVE_ATOMIC64_T */
239 atomic_t kmem_alloc_used = ATOMIC_INIT(0);
240 unsigned long long kmem_alloc_max = 0;
241 #endif /* HAVE_ATOMIC64_T */
242
243 EXPORT_SYMBOL(kmem_alloc_used);
244 EXPORT_SYMBOL(kmem_alloc_max);
245
246 inline void *
247 spl_kmem_alloc_debug(size_t size, int flags, int node)
248 {
249 void *ptr;
250
251 ptr = spl_kmem_alloc_impl(size, flags, node);
252 if (ptr) {
253 kmem_alloc_used_add(size);
254 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
255 kmem_alloc_max = kmem_alloc_used_read();
256 }
257
258 return (ptr);
259 }
260
261 inline void
262 spl_kmem_free_debug(const void *ptr, size_t size)
263 {
264 kmem_alloc_used_sub(size);
265 spl_kmem_free_impl(ptr, size);
266 }
267
268 /*
269 * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
270 * but also the location of every alloc and free. When the SPL module is
271 * unloaded a list of all leaked addresses and where they were allocated
272 * will be dumped to the console. Enabling this feature has a significant
273 * impact on performance but it makes finding memory leaks straight forward.
274 *
275 * Not surprisingly with debugging enabled the xmem_locks are very highly
276 * contended particularly on xfree(). If we want to run with this detailed
277 * debugging enabled for anything other than debugging we need to minimize
278 * the contention by moving to a lock per xmem_table entry model.
279 *
280 * ./configure --enable-debug-kmem-tracking
281 */
282 #ifdef DEBUG_KMEM_TRACKING
283
284 #include <linux/hash.h>
285 #include <linux/ctype.h>
286
287 #define KMEM_HASH_BITS 10
288 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
289
290 typedef struct kmem_debug {
291 struct hlist_node kd_hlist; /* Hash node linkage */
292 struct list_head kd_list; /* List of all allocations */
293 void *kd_addr; /* Allocation pointer */
294 size_t kd_size; /* Allocation size */
295 const char *kd_func; /* Allocation function */
296 int kd_line; /* Allocation line */
297 } kmem_debug_t;
298
299 static spinlock_t kmem_lock;
300 static struct hlist_head kmem_table[KMEM_TABLE_SIZE];
301 static struct list_head kmem_list;
302
303 static kmem_debug_t *
304 kmem_del_init(spinlock_t *lock, struct hlist_head *table,
305 int bits, const void *addr)
306 {
307 struct hlist_head *head;
308 struct hlist_node *node;
309 struct kmem_debug *p;
310 unsigned long flags;
311
312 spin_lock_irqsave(lock, flags);
313
314 head = &table[hash_ptr((void *)addr, bits)];
315 hlist_for_each(node, head) {
316 p = list_entry(node, struct kmem_debug, kd_hlist);
317 if (p->kd_addr == addr) {
318 hlist_del_init(&p->kd_hlist);
319 list_del_init(&p->kd_list);
320 spin_unlock_irqrestore(lock, flags);
321 return (p);
322 }
323 }
324
325 spin_unlock_irqrestore(lock, flags);
326
327 return (NULL);
328 }
329
330 inline void *
331 spl_kmem_alloc_track(size_t size, int flags,
332 const char *func, int line, int node)
333 {
334 void *ptr = NULL;
335 kmem_debug_t *dptr;
336 unsigned long irq_flags;
337
338 dptr = kmalloc(sizeof (kmem_debug_t), kmem_flags_convert(flags));
339 if (dptr == NULL)
340 return (NULL);
341
342 dptr->kd_func = __strdup(func, flags);
343 if (dptr->kd_func == NULL) {
344 kfree(dptr);
345 return (NULL);
346 }
347
348 ptr = spl_kmem_alloc_debug(size, flags, node);
349 if (ptr == NULL) {
350 kfree(dptr->kd_func);
351 kfree(dptr);
352 return (NULL);
353 }
354
355 INIT_HLIST_NODE(&dptr->kd_hlist);
356 INIT_LIST_HEAD(&dptr->kd_list);
357
358 dptr->kd_addr = ptr;
359 dptr->kd_size = size;
360 dptr->kd_line = line;
361
362 spin_lock_irqsave(&kmem_lock, irq_flags);
363 hlist_add_head(&dptr->kd_hlist,
364 &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
365 list_add_tail(&dptr->kd_list, &kmem_list);
366 spin_unlock_irqrestore(&kmem_lock, irq_flags);
367
368 return (ptr);
369 }
370
371 inline void
372 spl_kmem_free_track(const void *ptr, size_t size)
373 {
374 kmem_debug_t *dptr;
375
376 /* Must exist in hash due to kmem_alloc() */
377 dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
378 ASSERT3P(dptr, !=, NULL);
379 ASSERT3S(dptr->kd_size, ==, size);
380
381 kfree(dptr->kd_func);
382 kfree(dptr);
383
384 spl_kmem_free_debug(ptr, size);
385 }
386 #endif /* DEBUG_KMEM_TRACKING */
387 #endif /* DEBUG_KMEM */
388
389 /*
390 * Public kmem_alloc(), kmem_zalloc() and kmem_free() interfaces.
391 */
392 void *
393 spl_kmem_alloc(size_t size, int flags, const char *func, int line)
394 {
395 ASSERT0(flags & ~KM_PUBLIC_MASK);
396
397 #if !defined(DEBUG_KMEM)
398 return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE));
399 #elif !defined(DEBUG_KMEM_TRACKING)
400 return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE));
401 #else
402 return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE));
403 #endif
404 }
405 EXPORT_SYMBOL(spl_kmem_alloc);
406
407 void *
408 spl_kmem_zalloc(size_t size, int flags, const char *func, int line)
409 {
410 ASSERT0(flags & ~KM_PUBLIC_MASK);
411
412 flags |= KM_ZERO;
413
414 #if !defined(DEBUG_KMEM)
415 return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE));
416 #elif !defined(DEBUG_KMEM_TRACKING)
417 return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE));
418 #else
419 return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE));
420 #endif
421 }
422 EXPORT_SYMBOL(spl_kmem_zalloc);
423
424 void
425 spl_kmem_free(const void *buf, size_t size)
426 {
427 #if !defined(DEBUG_KMEM)
428 return (spl_kmem_free_impl(buf, size));
429 #elif !defined(DEBUG_KMEM_TRACKING)
430 return (spl_kmem_free_debug(buf, size));
431 #else
432 return (spl_kmem_free_track(buf, size));
433 #endif
434 }
435 EXPORT_SYMBOL(spl_kmem_free);
436
437 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
438 static char *
439 spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
440 {
441 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
442 int i, flag = 1;
443
444 ASSERT(str != NULL && len >= 17);
445 memset(str, 0, len);
446
447 /*
448 * Check for a fully printable string, and while we are at
449 * it place the printable characters in the passed buffer.
450 */
451 for (i = 0; i < size; i++) {
452 str[i] = ((char *)(kd->kd_addr))[i];
453 if (isprint(str[i])) {
454 continue;
455 } else {
456 /*
457 * Minimum number of printable characters found
458 * to make it worthwhile to print this as ascii.
459 */
460 if (i > min)
461 break;
462
463 flag = 0;
464 break;
465 }
466 }
467
468 if (!flag) {
469 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
470 *((uint8_t *)kd->kd_addr),
471 *((uint8_t *)kd->kd_addr + 2),
472 *((uint8_t *)kd->kd_addr + 4),
473 *((uint8_t *)kd->kd_addr + 6),
474 *((uint8_t *)kd->kd_addr + 8),
475 *((uint8_t *)kd->kd_addr + 10),
476 *((uint8_t *)kd->kd_addr + 12),
477 *((uint8_t *)kd->kd_addr + 14));
478 }
479
480 return (str);
481 }
482
483 static int
484 spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
485 {
486 int i;
487
488 spin_lock_init(lock);
489 INIT_LIST_HEAD(list);
490
491 for (i = 0; i < size; i++)
492 INIT_HLIST_HEAD(&kmem_table[i]);
493
494 return (0);
495 }
496
497 static void
498 spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
499 {
500 unsigned long flags;
501 kmem_debug_t *kd;
502 char str[17];
503
504 spin_lock_irqsave(lock, flags);
505 if (!list_empty(list))
506 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
507 "size", "data", "func", "line");
508
509 list_for_each_entry(kd, list, kd_list)
510 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
511 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
512 kd->kd_func, kd->kd_line);
513
514 spin_unlock_irqrestore(lock, flags);
515 }
516 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
517
518 int
519 spl_kmem_init(void)
520 {
521 #ifdef DEBUG_KMEM
522 kmem_alloc_used_set(0);
523
524 #ifdef DEBUG_KMEM_TRACKING
525 spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
526 #endif /* DEBUG_KMEM_TRACKING */
527 #endif /* DEBUG_KMEM */
528
529 return (0);
530 }
531
532 void
533 spl_kmem_fini(void)
534 {
535 #ifdef DEBUG_KMEM
536 /*
537 * Display all unreclaimed memory addresses, including the
538 * allocation size and the first few bytes of what's located
539 * at that address to aid in debugging. Performance is not
540 * a serious concern here since it is module unload time.
541 */
542 if (kmem_alloc_used_read() != 0)
543 printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
544 (unsigned long)kmem_alloc_used_read(), kmem_alloc_max);
545
546 #ifdef DEBUG_KMEM_TRACKING
547 spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
548 #endif /* DEBUG_KMEM_TRACKING */
549 #endif /* DEBUG_KMEM */
550 }