]> git.proxmox.com Git - mirror_spl.git/blame - modules/spl/spl-debug.c
Update SPL to use new debug infrastructure. This means:
[mirror_spl.git] / modules / spl / spl-debug.c
CommitLineData
57d1b188 1/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
3 *
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
7 * Author: Brian Behlendorf <behlendorf1@llnl.gov>
8 *
9 * This file was originally part of Lustre, http://www.lustre.org.
10 * but has subsequently been adapted for use in the SPL in
11 * accordance with the GPL.
12 *
13 * SPL is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
16 *
17 * SPL is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with SPL; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/kmod.h>
28#include <linux/mm.h>
29#include <linux/vmalloc.h>
30#include <linux/pagemap.h>
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <linux/kthread.h>
34#include <linux/hardirq.h>
35#include <linux/interrupt.h>
36#include <sys/sysmacros.h>
37#include <sys/proc.h>
38#include <sys/debug.h>
39#include <spl-ctl.h>
40#include "config.h"
41
42#ifdef DEBUG_SUBSYSTEM
43#undef DEBUG_SUBSYSTEM
44#endif
45
46#define DEBUG_SUBSYSTEM S_DEBUG
47
48unsigned long spl_debug_subsys = ~0;
49EXPORT_SYMBOL(spl_debug_subsys);
50module_param(spl_debug_subsys, long, 0644);
51MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
52
53unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
54EXPORT_SYMBOL(spl_debug_mask);
55module_param(spl_debug_mask, long, 0644);
56MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
57
58unsigned long spl_debug_printk = D_CANTMASK;
59EXPORT_SYMBOL(spl_debug_printk);
60module_param(spl_debug_printk, long, 0644);
61MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
62
63int spl_debug_mb = -1;
64EXPORT_SYMBOL(spl_debug_mb);
65module_param(spl_debug_mb, int, 0644);
66MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
67
68unsigned int spl_debug_binary = 1;
69EXPORT_SYMBOL(spl_debug_binary);
70
71unsigned int spl_debug_catastrophe;
72EXPORT_SYMBOL(spl_debug_catastrophe);
73
74unsigned int spl_debug_panic_on_bug = 1;
75EXPORT_SYMBOL(spl_debug_panic_on_bug);
76module_param(spl_debug_panic_on_bug, int, 0644);
77MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
78
79static char spl_debug_file_name[PATH_MAX];
80char spl_debug_file_path[PATH_MAX] = "/var/dumps/spl-log";
81
82unsigned int spl_console_ratelimit = 1;
83EXPORT_SYMBOL(spl_console_ratelimit);
84
85long spl_console_max_delay;
86EXPORT_SYMBOL(spl_console_max_delay);
87
88long spl_console_min_delay;
89EXPORT_SYMBOL(spl_console_min_delay);
90
91unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
92EXPORT_SYMBOL(spl_console_backoff);
93
94unsigned int spl_debug_stack;
95EXPORT_SYMBOL(spl_debug_stack);
96
97static int spl_panic_in_progress;
98
99union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
100char *trace_console_buffers[NR_CPUS][3];
101struct rw_semaphore trace_sem;
102atomic_t trace_tage_allocated = ATOMIC_INIT(0);
103
104static int panic_notifier(struct notifier_block *, unsigned long, void *);
105static int spl_debug_dump_all_pages(char *);
106static void trace_fini(void);
107
108
109/* Memory percentage breakdown by type */
110static unsigned int pages_factor[TCD_TYPE_MAX] = {
111 80, /* 80% pages for TCD_TYPE_PROC */
112 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
113 10 /* 10% pages for TCD_TYPE_IRQ */
114};
115
116static struct notifier_block spl_panic_notifier = {
117 notifier_call: panic_notifier,
118 next: NULL,
119 priority: 10000
120};
121
122const char *
123spl_debug_subsys2str(int subsys)
124{
125 switch (subsys) {
126 default:
127 return NULL;
128 case S_UNDEFINED:
129 return "undefined";
130 case S_ATOMIC:
131 return "atomic";
132 case S_KOBJ:
133 return "kobj";
134 case S_VNODE:
135 return "vnode";
136 case S_TIME:
137 return "time";
138 case S_RWLOCK:
139 return "rwlock";
140 case S_THREAD:
141 return "thread";
142 case S_CONDVAR:
143 return "condvar";
144 case S_MUTEX:
145 return "mutex";
146 case S_RNG:
147 return "rng";
148 case S_TASKQ:
149 return "taskq";
150 case S_KMEM:
151 return "kmem";
152 }
153}
154
155const char *
156spl_debug_dbg2str(int debug)
157{
158 switch (debug) {
159 default:
160 return NULL;
161 case D_TRACE:
162 return "trace";
163 case D_INFO:
164 return "info";
165 case D_WARNING:
166 return "warning";
167 case D_ERROR:
168 return "error";
169 case D_EMERG:
170 return "emerg";
171 case D_CONSOLE:
172 return "console";
173 case D_IOCTL:
174 return "ioctl";
175 case D_DPRINTF:
176 return "dprintf";
177 case D_OTHER:
178 return "other";
179 }
180}
181
182int
183spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
184{
185 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
186 spl_debug_dbg2str;
187 const char *token;
188 int i, bit, len = 0;
189
190 if (mask == 0) { /* "0" */
191 if (size > 0)
192 str[0] = '0';
193 len = 1;
194 } else { /* space-separated tokens */
195 for (i = 0; i < 32; i++) {
196 bit = 1 << i;
197
198 if ((mask & bit) == 0)
199 continue;
200
201 token = fn(bit);
202 if (token == NULL) /* unused bit */
203 continue;
204
205 if (len > 0) { /* separator? */
206 if (len < size)
207 str[len] = ' ';
208 len++;
209 }
210
211 while (*token != 0) {
212 if (len < size)
213 str[len] = *token;
214 token++;
215 len++;
216 }
217 }
218 }
219
220 /* terminate 'str' */
221 if (len < size)
222 str[len] = 0;
223 else
224 str[size - 1] = 0;
225
226 return len;
227}
228
229static int
230spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
231{
232 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
233 spl_debug_dbg2str;
234 const char *token;
235 int i, j, bit;
236
237 /* match against known tokens */
238 for (i = 0; i < 32; i++) {
239 bit = 1 << i;
240
241 token = fn(bit);
242 if (token == NULL) /* unused? */
243 continue;
244
245 /* strcasecmp */
246 for (j = 0; ; j++) {
247 if (j == len) { /* end of token */
248 if (token[j] == 0) {
249 *mask = bit;
250 return 0;
251 }
252 break;
253 }
254
255 if (token[j] == 0)
256 break;
257
258 if (str[j] == token[j])
259 continue;
260
261 if (str[j] < 'A' || 'Z' < str[j])
262 break;
263
264 if (str[j] - 'A' + 'a' != token[j])
265 break;
266 }
267 }
268
269 return -EINVAL; /* no match */
270}
271
272int
273spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
274{
275 char op = 0;
276 int m = 0, matched, n, t;
277
278 /* Allow a number for backwards compatibility */
279 for (n = strlen(str); n > 0; n--)
280 if (!isspace(str[n-1]))
281 break;
282 matched = n;
283
284 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
285 *mask = m;
286 return 0;
287 }
288
289 /* <str> must be a list of debug tokens or numbers separated by
290 * whitespace and optionally an operator ('+' or '-'). If an operator
291 * appears first in <str>, '*mask' is used as the starting point
292 * (relative), otherwise 0 is used (absolute). An operator applies to
293 * all following tokens up to the next operator. */
294 matched = 0;
295 while (*str != 0) {
296 while (isspace(*str)) /* skip whitespace */
297 str++;
298
299 if (*str == 0)
300 break;
301
302 if (*str == '+' || *str == '-') {
303 op = *str++;
304
305 /* op on first token == relative */
306 if (!matched)
307 m = *mask;
308
309 while (isspace(*str)) /* skip whitespace */
310 str++;
311
312 if (*str == 0) /* trailing op */
313 return -EINVAL;
314 }
315
316 /* find token length */
317 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
318
319 /* match token */
320 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
321 return -EINVAL;
322
323 matched = 1;
324 if (op == '-')
325 m &= ~t;
326 else
327 m |= t;
328
329 str += n;
330 }
331
332 if (!matched)
333 return -EINVAL;
334
335 *mask = m;
336 return 0;
337}
338
339typedef struct dumplog_priv {
340 wait_queue_head_t dp_waitq;
341 pid_t dp_pid;
342 atomic_t dp_flag;
343} dumplog_priv_t;
344
345static void
346spl_debug_dumplog_internal(dumplog_priv_t *dp)
347{
348 void *journal_info;
349
350 journal_info = current->journal_info;
351 current->journal_info = NULL;
352
353 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
354 "%s.%ld.%ld", spl_debug_file_path,
355 get_seconds(), (long)dp->dp_pid);
356 printk(KERN_ALERT "SPL: dumping log to %s\n", spl_debug_file_name);
357 spl_debug_dump_all_pages(spl_debug_file_name);
358
359 current->journal_info = journal_info;
360}
361
362static int
363spl_debug_dumplog_thread(void *arg)
364{
365 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
366
367 spl_debug_dumplog_internal(dp);
368 atomic_set(&dp->dp_flag, 1);
369 wake_up(&dp->dp_waitq);
370 do_exit(0);
371
372 return 0; /* Unreachable */
373}
374
375int
376spl_debug_dumplog(void)
377{
378 struct task_struct *tsk;
379 dumplog_priv_t dp;
380 ENTRY;
381
382 init_waitqueue_head(&dp.dp_waitq);
383 dp.dp_pid = current->pid;
384 atomic_set(&dp.dp_flag, 0);
385
386 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
387 if (tsk == NULL)
388 RETURN(-ENOMEM);
389
390 wake_up_process(tsk);
391 wait_event(dp.dp_waitq, atomic_read(&dp.dp_flag));
392
393 RETURN(0);
394}
395EXPORT_SYMBOL(spl_debug_dumplog);
396
397static char *
398trace_get_console_buffer(void)
399{
400 int cpu = get_cpu();
401 int idx;
402
403 if (in_irq()) {
404 idx = 0;
405 } else if (in_softirq()) {
406 idx = 1;
407 } else {
408 idx = 2;
409 }
410
411 return trace_console_buffers[cpu][idx];
412}
413
414static void
415trace_put_console_buffer(char *buffer)
416{
417 put_cpu();
418}
419
420static struct trace_cpu_data *
421trace_get_tcd(void)
422{
423 int cpu;
424
425 cpu = get_cpu();
426 if (in_irq())
427 return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
428 else if (in_softirq())
429 return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
430
431 return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
432}
433
434static void
435trace_put_tcd (struct trace_cpu_data *tcd)
436{
437 put_cpu();
438}
439
440static int
441trace_lock_tcd(struct trace_cpu_data *tcd)
442{
443 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
444
445 if (tcd->tcd_type == TCD_TYPE_IRQ)
446 local_irq_disable();
447 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
448 local_bh_disable();
449
450 return 1;
451}
452
453static void
454trace_unlock_tcd(struct trace_cpu_data *tcd)
455{
456 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
457
458 if (tcd->tcd_type == TCD_TYPE_IRQ)
459 local_irq_enable();
460 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
461 local_bh_enable();
462}
463
464static void
465trace_set_debug_header(struct spl_debug_header *header, int subsys,
466 int mask, const int line, unsigned long stack)
467{
468 struct timeval tv;
469
470 do_gettimeofday(&tv);
471
472 header->ph_subsys = subsys;
473 header->ph_mask = mask;
474 header->ph_cpu_id = smp_processor_id();
475 header->ph_sec = (__u32)tv.tv_sec;
476 header->ph_usec = tv.tv_usec;
477 header->ph_stack = stack;
478 header->ph_pid = current->pid;
479 header->ph_line_num = line;
480
481 return;
482}
483
484static void
485trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
486 int len, const char *file, const char *fn)
487{
488 char *prefix = "SPL", *ptype = NULL;
489
490 if ((mask & D_EMERG) != 0) {
491 prefix = "SPLError";
492 ptype = KERN_EMERG;
493 } else if ((mask & D_ERROR) != 0) {
494 prefix = "SPLError";
495 ptype = KERN_ERR;
496 } else if ((mask & D_WARNING) != 0) {
497 prefix = "SPL";
498 ptype = KERN_WARNING;
499 } else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
500 prefix = "SPL";
501 ptype = KERN_INFO;
502 }
503
504 if ((mask & D_CONSOLE) != 0) {
505 printk("%s%s: %.*s", ptype, prefix, len, buf);
506 } else {
507 printk("%s%s: %d:(%s:%d:%s()) %.*s", ptype, prefix, hdr->ph_pid,
508 file, hdr->ph_line_num, fn, len, buf);
509 }
510
511 return;
512}
513
514static int
515trace_max_debug_mb(void)
516{
517 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
518}
519
520static void
521trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
522{
523 cpumask_t mask, cpus_allowed = current->cpus_allowed;
524 int cpu;
525
526 for_each_online_cpu(cpu) {
527 cpus_clear(mask);
528 cpu_set(cpu, mask);
529 set_cpus_allowed(current, mask);
530
531 fn(arg);
532
533 set_cpus_allowed(current, cpus_allowed);
534 }
535}
536
537static struct trace_page *
538tage_alloc(int gfp)
539{
540 struct page *page;
541 struct trace_page *tage;
542
543 page = alloc_pages(gfp | __GFP_NOWARN, 0);
544 if (page == NULL)
545 return NULL;
546
547 tage = kmalloc(sizeof(*tage), gfp);
548 if (tage == NULL) {
549 __free_pages(page, 0);
550 return NULL;
551 }
552
553 tage->page = page;
554 atomic_inc(&trace_tage_allocated);
555
556 return tage;
557}
558
559static void
560tage_free(struct trace_page *tage)
561{
562 __ASSERT(tage != NULL);
563 __ASSERT(tage->page != NULL);
564
565 __free_pages(tage->page, 0);
566 kfree(tage);
567 atomic_dec(&trace_tage_allocated);
568}
569
570static struct trace_page *
571tage_from_list(struct list_head *list)
572{
573 return list_entry(list, struct trace_page, linkage);
574}
575
576static void
577tage_to_tail(struct trace_page *tage, struct list_head *queue)
578{
579 __ASSERT(tage != NULL);
580 __ASSERT(queue != NULL);
581
582 list_move_tail(&tage->linkage, queue);
583}
584
585/* try to return a page that has 'len' bytes left at the end */
586static struct trace_page *
587trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
588{
589 struct trace_page *tage;
590
591 if (tcd->tcd_cur_pages > 0) {
592 __ASSERT(!list_empty(&tcd->tcd_pages));
593 tage = tage_from_list(tcd->tcd_pages.prev);
594 if (tage->used + len <= PAGE_SIZE)
595 return tage;
596 }
597
598 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
599 if (tcd->tcd_cur_stock_pages > 0) {
600 tage = tage_from_list(tcd->tcd_stock_pages.prev);
601 tcd->tcd_cur_stock_pages--;
602 list_del_init(&tage->linkage);
603 } else {
604 tage = tage_alloc(GFP_ATOMIC);
605 if (tage == NULL) {
606 printk(KERN_WARNING
607 "failure to allocate a tage (%ld)\n",
608 tcd->tcd_cur_pages);
609 return NULL;
610 }
611 }
612
613 tage->used = 0;
614 tage->cpu = smp_processor_id();
615 tage->type = tcd->tcd_type;
616 list_add_tail(&tage->linkage, &tcd->tcd_pages);
617 tcd->tcd_cur_pages++;
618
619 return tage;
620 }
621
622 return NULL;
623}
624
625/* return a page that has 'len' bytes left at the end */
626static struct trace_page *
627trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
628{
629 struct trace_page *tage;
630
631 __ASSERT(len <= PAGE_SIZE);
632
633 tage = trace_get_tage_try(tcd, len);
634 if (tage)
635 return tage;
636
637 if (tcd->tcd_cur_pages > 0) {
638 tage = tage_from_list(tcd->tcd_pages.next);
639 tage->used = 0;
640 tage_to_tail(tage, &tcd->tcd_pages);
641 }
642
643 return tage;
644}
645
646int
647spl_debug_vmsg(spl_debug_limit_state_t *cdls, int subsys, int mask,
648 const char *file, const char *fn, const int line,
649 const char *format1, va_list args, const char *format2, ...)
650{
651 struct trace_cpu_data *tcd = NULL;
652 struct spl_debug_header header;
653 struct trace_page *tage;
654 /* string_buf is used only if tcd != NULL, and is always set then */
655 char *string_buf = NULL;
656 char *debug_buf;
657 int known_size;
658 int needed = 85; /* average message length */
659 int max_nob;
660 va_list ap;
661 int i;
662 int remain;
663
664 if (strchr(file, '/'))
665 file = strrchr(file, '/') + 1;
666
667 trace_set_debug_header(&header, subsys, mask, line, CDEBUG_STACK());
668
669 tcd = trace_get_tcd();
670 if (tcd == NULL)
671 goto console;
672
673 if (tcd->tcd_shutting_down) {
674 trace_put_tcd(tcd);
675 tcd = NULL;
676 goto console;
677 }
678
679 known_size = strlen(file) + 1;
680 if (fn)
681 known_size += strlen(fn) + 1;
682
683 if (spl_debug_binary)
684 known_size += sizeof(header);
685
686 /* '2' used because vsnprintf returns real size required for output
687 * _without_ terminating NULL. */
688 for (i = 0; i < 2; i++) {
689 tage = trace_get_tage(tcd, needed + known_size + 1);
690 if (tage == NULL) {
691 if (needed + known_size > PAGE_SIZE)
692 mask |= D_ERROR;
693
694 trace_put_tcd(tcd);
695 tcd = NULL;
696 goto console;
697 }
698
699 string_buf = (char *)page_address(tage->page) +
700 tage->used + known_size;
701
702 max_nob = PAGE_SIZE - tage->used - known_size;
703 if (max_nob <= 0) {
704 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
705 mask |= D_ERROR;
706 trace_put_tcd(tcd);
707 tcd = NULL;
708 goto console;
709 }
710
711 needed = 0;
712 if (format1) {
713 va_copy(ap, args);
714 needed = vsnprintf(string_buf, max_nob, format1, ap);
715 va_end(ap);
716 }
717
718 if (format2) {
719 remain = max_nob - needed;
720 if (remain < 0)
721 remain = 0;
722
723 va_start(ap, format2);
724 needed += vsnprintf(string_buf+needed, remain, format2, ap);
725 va_end(ap);
726 }
727
728 if (needed < max_nob)
729 break;
730 }
731
732 if (unlikely(*(string_buf + needed - 1) != '\n'))
733 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
734 file, line, fn);
735
736 header.ph_len = known_size + needed;
737 debug_buf = (char *)page_address(tage->page) + tage->used;
738
739 if (spl_debug_binary) {
740 memcpy(debug_buf, &header, sizeof(header));
741 tage->used += sizeof(header);
742 debug_buf += sizeof(header);
743 }
744
745 strcpy(debug_buf, file);
746 tage->used += strlen(file) + 1;
747 debug_buf += strlen(file) + 1;
748
749 if (fn) {
750 strcpy(debug_buf, fn);
751 tage->used += strlen(fn) + 1;
752 debug_buf += strlen(fn) + 1;
753 }
754
755 __ASSERT(debug_buf == string_buf);
756
757 tage->used += needed;
758 __ASSERT (tage->used <= PAGE_SIZE);
759
760console:
761 if ((mask & spl_debug_printk) == 0) {
762 /* no console output requested */
763 if (tcd != NULL)
764 trace_put_tcd(tcd);
765 return 1;
766 }
767
768 if (cdls != NULL) {
769 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
770 !time_before(cdls->cdls_next, jiffies)) {
771 /* skipping a console message */
772 cdls->cdls_count++;
773 if (tcd != NULL)
774 trace_put_tcd(tcd);
775 return 1;
776 }
777
778 if (time_before(cdls->cdls_next + spl_console_max_delay +
779 (10 * HZ), jiffies)) {
780 /* last timeout was a long time ago */
781 cdls->cdls_delay /= spl_console_backoff * 4;
782 } else {
783 cdls->cdls_delay *= spl_console_backoff;
784
785 if (cdls->cdls_delay < spl_console_min_delay)
786 cdls->cdls_delay = spl_console_min_delay;
787 else if (cdls->cdls_delay > spl_console_max_delay)
788 cdls->cdls_delay = spl_console_max_delay;
789 }
790
791 /* ensure cdls_next is never zero after it's been seen */
792 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
793 }
794
795 if (tcd != NULL) {
796 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
797 trace_put_tcd(tcd);
798 } else {
799 string_buf = trace_get_console_buffer();
800
801 needed = 0;
802 if (format1 != NULL) {
803 va_copy(ap, args);
804 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
805 va_end(ap);
806 }
807 if (format2 != NULL) {
808 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
809 if (remain > 0) {
810 va_start(ap, format2);
811 needed += vsnprintf(string_buf+needed, remain, format2, ap);
812 va_end(ap);
813 }
814 }
815 trace_print_to_console(&header, mask,
816 string_buf, needed, file, fn);
817
818 trace_put_console_buffer(string_buf);
819 }
820
821 if (cdls != NULL && cdls->cdls_count != 0) {
822 string_buf = trace_get_console_buffer();
823
824 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
825 "Skipped %d previous similar message%s\n",
826 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
827
828 trace_print_to_console(&header, mask,
829 string_buf, needed, file, fn);
830
831 trace_put_console_buffer(string_buf);
832 cdls->cdls_count = 0;
833 }
834
835 return 0;
836}
837EXPORT_SYMBOL(spl_debug_vmsg);
838
839/* Do the collect_pages job on a single CPU: assumes that all other
840 * CPUs have been stopped during a panic. If this isn't true for
841 * some arch, this will have to be implemented separately in each arch.
842 */
843static void
844panic_collect_pages(struct page_collection *pc)
845{
846 struct trace_cpu_data *tcd;
847 int i, j;
848
849 tcd_for_each(tcd, i, j) {
850 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
851 tcd->tcd_cur_pages = 0;
852 }
853}
854
855static void
856collect_pages_on_cpu(void *info)
857{
858 struct trace_cpu_data *tcd;
859 struct page_collection *pc = info;
860 int i;
861
862 spin_lock(&pc->pc_lock);
863 tcd_for_each_type_lock(tcd, i) {
864 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
865 tcd->tcd_cur_pages = 0;
866 }
867 spin_unlock(&pc->pc_lock);
868}
869
870static void
871collect_pages(struct page_collection *pc)
872{
873 INIT_LIST_HEAD(&pc->pc_pages);
874
875 if (spl_panic_in_progress)
876 panic_collect_pages(pc);
877 else
878 trace_call_on_all_cpus(collect_pages_on_cpu, pc);
879}
880
881static void
882put_pages_back_on_cpu(void *info)
883{
884 struct page_collection *pc = info;
885 struct trace_cpu_data *tcd;
886 struct list_head *cur_head;
887 struct trace_page *tage;
888 struct trace_page *tmp;
889 int i;
890
891 spin_lock(&pc->pc_lock);
892 tcd_for_each_type_lock(tcd, i) {
893 cur_head = tcd->tcd_pages.next;
894
895 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
896
897 __ASSERT_TAGE_INVARIANT(tage);
898
899 if (tage->cpu != smp_processor_id() || tage->type != i)
900 continue;
901
902 tage_to_tail(tage, cur_head);
903 tcd->tcd_cur_pages++;
904 }
905 }
906 spin_unlock(&pc->pc_lock);
907}
908
909static void
910put_pages_back(struct page_collection *pc)
911{
912 if (!spl_panic_in_progress)
913 trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
914}
915
916static struct file *
917trace_filp_open (const char *name, int flags, int mode, int *err)
918{
919 struct file *filp = NULL;
920 int rc;
921
922 filp = filp_open(name, flags, mode);
923 if (IS_ERR(filp)) {
924 rc = PTR_ERR(filp);
925 printk(KERN_ERR "SPL: Can't open %s file: %d\n", name, rc);
926 if (err)
927 *err = rc;
928 filp = NULL;
929 }
930 return filp;
931}
932
933#define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
934#define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
935#define trace_filp_close(f) filp_close(f, NULL)
936#define trace_filp_poff(f) (&(f)->f_pos)
937
938static int
939spl_debug_dump_all_pages(char *filename)
940{
941 struct page_collection pc;
942 struct file *filp;
943 struct trace_page *tage;
944 struct trace_page *tmp;
945 mm_segment_t oldfs;
946 int rc = 0;
947
948 down_write(&trace_sem);
949
950 filp = trace_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
951 0600, &rc);
952 if (filp == NULL) {
953 if (rc != -EEXIST)
954 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
955 filename, rc);
956 goto out;
957 }
958
959 spin_lock_init(&pc.pc_lock);
960 collect_pages(&pc);
961 if (list_empty(&pc.pc_pages)) {
962 rc = 0;
963 goto close;
964 }
965
966 oldfs = get_fs();
967 set_fs(get_ds());
968
969 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
970 __ASSERT_TAGE_INVARIANT(tage);
971
972 rc = trace_filp_write(filp, page_address(tage->page),
973 tage->used, trace_filp_poff(filp));
974 if (rc != (int)tage->used) {
975 printk(KERN_WARNING "SPL: Wanted to write %u "
976 "but wrote %d\n", tage->used, rc);
977 put_pages_back(&pc);
978 __ASSERT(list_empty(&pc.pc_pages));
979 break;
980 }
981 list_del(&tage->linkage);
982 tage_free(tage);
983 }
984
985 set_fs(oldfs);
986
987 rc = trace_filp_fsync(filp);
988 if (rc)
989 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
990 close:
991 trace_filp_close(filp);
992 out:
993 up_write(&trace_sem);
994
995 return rc;
996}
997
998static void
999spl_debug_flush_pages(void)
1000{
1001 struct page_collection pc;
1002 struct trace_page *tage;
1003 struct trace_page *tmp;
1004
1005 spin_lock_init(&pc.pc_lock);
1006
1007 collect_pages(&pc);
1008 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1009 __ASSERT_TAGE_INVARIANT(tage);
1010 list_del(&tage->linkage);
1011 tage_free(tage);
1012 }
1013}
1014
1015unsigned long
1016spl_debug_set_mask(unsigned long mask) {
1017 spl_debug_mask = mask;
1018 return 0;
1019}
1020EXPORT_SYMBOL(spl_debug_set_mask);
1021
1022unsigned long
1023spl_debug_get_mask(void) {
1024 return spl_debug_mask;
1025}
1026EXPORT_SYMBOL(spl_debug_get_mask);
1027
1028unsigned long
1029spl_debug_set_subsys(unsigned long subsys) {
1030 spl_debug_subsys = subsys;
1031 return 0;
1032}
1033EXPORT_SYMBOL(spl_debug_set_subsys);
1034
1035unsigned long
1036spl_debug_get_subsys(void) {
1037 return spl_debug_subsys;
1038}
1039EXPORT_SYMBOL(spl_debug_get_subsys);
1040
1041int
1042spl_debug_set_mb(int mb)
1043{
1044 int i, j, pages;
1045 int limit = trace_max_debug_mb();
1046 struct trace_cpu_data *tcd;
1047
1048 if (mb < num_possible_cpus()) {
1049 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1050 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1051 return -EINVAL;
1052 }
1053
1054 if (mb > limit) {
1055 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1056 "%dMB - upper limit is %d\n", mb, limit);
1057 return -EINVAL;
1058 }
1059
1060 mb /= num_possible_cpus();
1061 pages = mb << (20 - PAGE_SHIFT);
1062
1063 down_write(&trace_sem);
1064
1065 tcd_for_each(tcd, i, j)
1066 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1067
1068 up_write(&trace_sem);
1069
1070 return 0;
1071}
1072EXPORT_SYMBOL(spl_debug_set_mb);
1073
1074int
1075spl_debug_get_mb(void)
1076{
1077 int i, j;
1078 struct trace_cpu_data *tcd;
1079 int total_pages = 0;
1080
1081 down_read(&trace_sem);
1082
1083 tcd_for_each(tcd, i, j)
1084 total_pages += tcd->tcd_max_pages;
1085
1086 up_read(&trace_sem);
1087
1088 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1089}
1090EXPORT_SYMBOL(spl_debug_get_mb);
1091
1092void spl_debug_dumpstack(struct task_struct *tsk)
1093{
1094 extern void show_task(struct task_struct *);
1095
1096 if (tsk == NULL)
1097 tsk = current;
1098
1099 CWARN("showing stack for process %d\n", tsk->pid);
1100 show_task(tsk);
1101}
1102EXPORT_SYMBOL(spl_debug_dumpstack);
1103
1104void spl_debug_bug(char *file, const char *func, const int line)
1105{
1106 spl_debug_catastrophe = 1;
937879f1 1107 spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
57d1b188 1108
1109 if (in_interrupt()) {
937879f1 1110 panic("SBUG in interrupt.\n");
57d1b188 1111 /* not reached */
1112 }
1113
1114 /* Ensure all debug pages and dumped by current cpu */
1115 if (spl_debug_panic_on_bug)
1116 spl_panic_in_progress = 1;
1117
1118 spl_debug_dumpstack(NULL);
1119 spl_debug_dumplog();
1120
1121 if (spl_debug_panic_on_bug)
937879f1 1122 panic("SBUG");
57d1b188 1123
1124 set_task_state(current, TASK_UNINTERRUPTIBLE);
1125 while (1)
1126 schedule();
1127}
1128EXPORT_SYMBOL(spl_debug_bug);
1129
1130int
1131spl_debug_clear_buffer(void)
1132{
1133 spl_debug_flush_pages();
1134 return 0;
1135}
1136EXPORT_SYMBOL(spl_debug_clear_buffer);
1137
1138int
1139spl_debug_mark_buffer(char *text)
1140{
1141 CDEBUG(D_WARNING, "*************************************\n");
1142 CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
1143 CDEBUG(D_WARNING, "*************************************\n");
1144
1145 return 0;
1146}
1147EXPORT_SYMBOL(spl_debug_mark_buffer);
1148
1149static int
1150panic_notifier(struct notifier_block *self,
1151 unsigned long unused1, void *unused2)
1152{
1153 if (spl_panic_in_progress)
1154 return 0;
1155
1156 spl_panic_in_progress = 1;
1157 mb();
1158
1159 if (!in_interrupt()) {
1160 while (current->lock_depth >= 0)
1161 unlock_kernel();
1162
1163 spl_debug_dumplog_internal((void *)(long)current->pid);
1164 }
1165
1166 return 0;
1167}
1168
1169static int
1170trace_init(int max_pages)
1171{
1172 struct trace_cpu_data *tcd;
1173 int i, j;
1174
1175 init_rwsem(&trace_sem);
1176
1177 /* initialize trace_data */
1178 memset(trace_data, 0, sizeof(trace_data));
1179 for (i = 0; i < TCD_TYPE_MAX; i++) {
1180 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1181 NR_CPUS, GFP_KERNEL);
1182 if (trace_data[i] == NULL)
1183 goto out;
1184 }
1185
1186 tcd_for_each(tcd, i, j) {
1187 tcd->tcd_pages_factor = pages_factor[i];
1188 tcd->tcd_type = i;
1189 tcd->tcd_cpu = j;
1190 INIT_LIST_HEAD(&tcd->tcd_pages);
1191 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1192 tcd->tcd_cur_pages = 0;
1193 tcd->tcd_cur_stock_pages = 0;
1194 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1195 tcd->tcd_shutting_down = 0;
1196 }
1197
1198 for (i = 0; i < num_possible_cpus(); i++) {
1199 for (j = 0; j < 3; j++) {
1200 trace_console_buffers[i][j] =
1201 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1202 GFP_KERNEL);
1203
1204 if (trace_console_buffers[i][j] == NULL)
1205 goto out;
1206 }
1207 }
1208
1209 return 0;
1210out:
1211 trace_fini();
1212 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1213 return -ENOMEM;
1214}
1215
1216int
1217debug_init(void)
1218{
1219 int rc, max = spl_debug_mb;
1220
1221 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1222 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1223
1224 /* If spl_debug_mb is set to an invalid value or uninitialized
1225 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1226 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1227 max >= 512 || max < 0) {
1228 max = TCD_MAX_PAGES;
1229 } else {
1230 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1231 }
1232
1233 rc = trace_init(max);
1234 if (rc)
1235 return rc;
1236
1237 atomic_notifier_chain_register(&panic_notifier_list,
1238 &spl_panic_notifier);
1239 return rc;
1240}
1241
1242static void
1243trace_cleanup_on_cpu(void *info)
1244{
1245 struct trace_cpu_data *tcd;
1246 struct trace_page *tage;
1247 struct trace_page *tmp;
1248 int i;
1249
1250 tcd_for_each_type_lock(tcd, i) {
1251 tcd->tcd_shutting_down = 1;
1252
1253 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1254 __ASSERT_TAGE_INVARIANT(tage);
1255
1256 list_del(&tage->linkage);
1257 tage_free(tage);
1258 }
1259 tcd->tcd_cur_pages = 0;
1260 }
1261}
1262
1263static void
1264trace_fini(void)
1265{
1266 int i, j;
1267
1268 trace_call_on_all_cpus(trace_cleanup_on_cpu, NULL);
1269
1270 for (i = 0; i < num_possible_cpus(); i++) {
1271 for (j = 0; j < 3; j++) {
1272 if (trace_console_buffers[i][j] != NULL) {
1273 kfree(trace_console_buffers[i][j]);
1274 trace_console_buffers[i][j] = NULL;
1275 }
1276 }
1277 }
1278
1279 for (i = 0; trace_data[i] != NULL; i++) {
1280 kfree(trace_data[i]);
1281 trace_data[i] = NULL;
1282 }
1283}
1284
1285void
1286debug_fini(void)
1287{
1288 atomic_notifier_chain_unregister(&panic_notifier_list,
1289 &spl_panic_notifier);
1290 trace_fini();
1291
1292 return;
1293}