]> git.proxmox.com Git - mirror_spl.git/blame - modules/spl/spl-debug.c
Go through and add a header with the proper UCRL number.
[mirror_spl.git] / modules / spl / spl-debug.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
57d1b188 3 *
715f6251 4 * This file was originally part of Lustre, http://www.lustre.org.
5 * but has subsequently been adapted for use in the SPL in
6 * accordance with the GPL.
57d1b188 7 *
715f6251 8 * Copyright (C) 2004 Cluster File Systems, Inc.
9 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
10 * Produced at Lawrence Livermore National Laboratory
11 * Written by:
12 * Zach Brown <zab@clusterfs.com>
13 * Phil Schwan <phil@clusterfs.com>
14 * Brian Behlendorf <behlendorf1@llnl.gov>,
15 * Herb Wartens <wartens2@llnl.gov>,
16 * Jim Garlick <garlick@llnl.gov>
17 * UCRL-CODE-235197
57d1b188 18 *
715f6251 19 * This is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
57d1b188 23 *
715f6251 24 * This is distributed in the hope that it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
57d1b188 28 *
715f6251 29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
57d1b188 32 */
33
34#include <linux/kmod.h>
35#include <linux/mm.h>
36#include <linux/vmalloc.h>
37#include <linux/pagemap.h>
38#include <linux/slab.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/hardirq.h>
42#include <linux/interrupt.h>
43#include <sys/sysmacros.h>
44#include <sys/proc.h>
45#include <sys/debug.h>
46#include <spl-ctl.h>
47#include "config.h"
48
49#ifdef DEBUG_SUBSYSTEM
50#undef DEBUG_SUBSYSTEM
51#endif
52
53#define DEBUG_SUBSYSTEM S_DEBUG
54
55unsigned long spl_debug_subsys = ~0;
56EXPORT_SYMBOL(spl_debug_subsys);
57module_param(spl_debug_subsys, long, 0644);
58MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
59
60unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
61EXPORT_SYMBOL(spl_debug_mask);
62module_param(spl_debug_mask, long, 0644);
63MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
64
65unsigned long spl_debug_printk = D_CANTMASK;
66EXPORT_SYMBOL(spl_debug_printk);
67module_param(spl_debug_printk, long, 0644);
68MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
69
70int spl_debug_mb = -1;
71EXPORT_SYMBOL(spl_debug_mb);
72module_param(spl_debug_mb, int, 0644);
73MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
74
75unsigned int spl_debug_binary = 1;
76EXPORT_SYMBOL(spl_debug_binary);
77
78unsigned int spl_debug_catastrophe;
79EXPORT_SYMBOL(spl_debug_catastrophe);
80
81unsigned int spl_debug_panic_on_bug = 1;
82EXPORT_SYMBOL(spl_debug_panic_on_bug);
83module_param(spl_debug_panic_on_bug, int, 0644);
84MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
85
86static char spl_debug_file_name[PATH_MAX];
87char spl_debug_file_path[PATH_MAX] = "/var/dumps/spl-log";
88
89unsigned int spl_console_ratelimit = 1;
90EXPORT_SYMBOL(spl_console_ratelimit);
91
92long spl_console_max_delay;
93EXPORT_SYMBOL(spl_console_max_delay);
94
95long spl_console_min_delay;
96EXPORT_SYMBOL(spl_console_min_delay);
97
98unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
99EXPORT_SYMBOL(spl_console_backoff);
100
101unsigned int spl_debug_stack;
102EXPORT_SYMBOL(spl_debug_stack);
103
104static int spl_panic_in_progress;
105
106union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
107char *trace_console_buffers[NR_CPUS][3];
108struct rw_semaphore trace_sem;
109atomic_t trace_tage_allocated = ATOMIC_INIT(0);
110
111static int panic_notifier(struct notifier_block *, unsigned long, void *);
7fea96c0 112static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
57d1b188 113static void trace_fini(void);
114
115
116/* Memory percentage breakdown by type */
117static unsigned int pages_factor[TCD_TYPE_MAX] = {
118 80, /* 80% pages for TCD_TYPE_PROC */
119 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
120 10 /* 10% pages for TCD_TYPE_IRQ */
121};
122
123static struct notifier_block spl_panic_notifier = {
124 notifier_call: panic_notifier,
125 next: NULL,
126 priority: 10000
127};
128
129const char *
130spl_debug_subsys2str(int subsys)
131{
132 switch (subsys) {
133 default:
134 return NULL;
135 case S_UNDEFINED:
136 return "undefined";
137 case S_ATOMIC:
138 return "atomic";
139 case S_KOBJ:
140 return "kobj";
141 case S_VNODE:
142 return "vnode";
143 case S_TIME:
144 return "time";
145 case S_RWLOCK:
146 return "rwlock";
147 case S_THREAD:
148 return "thread";
149 case S_CONDVAR:
150 return "condvar";
151 case S_MUTEX:
152 return "mutex";
153 case S_RNG:
154 return "rng";
155 case S_TASKQ:
156 return "taskq";
157 case S_KMEM:
158 return "kmem";
e5bbd245 159 case S_DEBUG:
160 return "debug";
161 case S_GENERIC:
162 return "generic";
163 case S_PROC:
164 return "proc";
165 case S_MODULE:
166 return "module";
57d1b188 167 }
168}
169
170const char *
171spl_debug_dbg2str(int debug)
172{
173 switch (debug) {
174 default:
175 return NULL;
176 case D_TRACE:
177 return "trace";
178 case D_INFO:
179 return "info";
180 case D_WARNING:
181 return "warning";
182 case D_ERROR:
183 return "error";
184 case D_EMERG:
185 return "emerg";
186 case D_CONSOLE:
187 return "console";
188 case D_IOCTL:
189 return "ioctl";
190 case D_DPRINTF:
191 return "dprintf";
192 case D_OTHER:
193 return "other";
194 }
195}
196
197int
198spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
199{
200 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
201 spl_debug_dbg2str;
202 const char *token;
203 int i, bit, len = 0;
204
205 if (mask == 0) { /* "0" */
206 if (size > 0)
207 str[0] = '0';
208 len = 1;
209 } else { /* space-separated tokens */
210 for (i = 0; i < 32; i++) {
211 bit = 1 << i;
212
213 if ((mask & bit) == 0)
214 continue;
215
216 token = fn(bit);
217 if (token == NULL) /* unused bit */
218 continue;
219
220 if (len > 0) { /* separator? */
221 if (len < size)
222 str[len] = ' ';
223 len++;
224 }
225
226 while (*token != 0) {
227 if (len < size)
228 str[len] = *token;
229 token++;
230 len++;
231 }
232 }
233 }
234
235 /* terminate 'str' */
236 if (len < size)
237 str[len] = 0;
238 else
239 str[size - 1] = 0;
240
241 return len;
242}
243
244static int
245spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
246{
247 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
248 spl_debug_dbg2str;
249 const char *token;
250 int i, j, bit;
251
252 /* match against known tokens */
253 for (i = 0; i < 32; i++) {
254 bit = 1 << i;
255
256 token = fn(bit);
257 if (token == NULL) /* unused? */
258 continue;
259
260 /* strcasecmp */
261 for (j = 0; ; j++) {
262 if (j == len) { /* end of token */
263 if (token[j] == 0) {
264 *mask = bit;
265 return 0;
266 }
267 break;
268 }
269
270 if (token[j] == 0)
271 break;
272
273 if (str[j] == token[j])
274 continue;
275
276 if (str[j] < 'A' || 'Z' < str[j])
277 break;
278
279 if (str[j] - 'A' + 'a' != token[j])
280 break;
281 }
282 }
283
284 return -EINVAL; /* no match */
285}
286
287int
288spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
289{
290 char op = 0;
291 int m = 0, matched, n, t;
292
293 /* Allow a number for backwards compatibility */
294 for (n = strlen(str); n > 0; n--)
295 if (!isspace(str[n-1]))
296 break;
297 matched = n;
298
299 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
300 *mask = m;
301 return 0;
302 }
303
304 /* <str> must be a list of debug tokens or numbers separated by
305 * whitespace and optionally an operator ('+' or '-'). If an operator
306 * appears first in <str>, '*mask' is used as the starting point
307 * (relative), otherwise 0 is used (absolute). An operator applies to
308 * all following tokens up to the next operator. */
309 matched = 0;
310 while (*str != 0) {
311 while (isspace(*str)) /* skip whitespace */
312 str++;
313
314 if (*str == 0)
315 break;
316
317 if (*str == '+' || *str == '-') {
318 op = *str++;
319
320 /* op on first token == relative */
321 if (!matched)
322 m = *mask;
323
324 while (isspace(*str)) /* skip whitespace */
325 str++;
326
327 if (*str == 0) /* trailing op */
328 return -EINVAL;
329 }
330
331 /* find token length */
332 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
333
334 /* match token */
335 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
336 return -EINVAL;
337
338 matched = 1;
339 if (op == '-')
340 m &= ~t;
341 else
342 m |= t;
343
344 str += n;
345 }
346
347 if (!matched)
348 return -EINVAL;
349
350 *mask = m;
351 return 0;
352}
353
57d1b188 354static void
355spl_debug_dumplog_internal(dumplog_priv_t *dp)
356{
357 void *journal_info;
358
359 journal_info = current->journal_info;
360 current->journal_info = NULL;
361
362 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
363 "%s.%ld.%ld", spl_debug_file_path,
364 get_seconds(), (long)dp->dp_pid);
365 printk(KERN_ALERT "SPL: dumping log to %s\n", spl_debug_file_name);
7fea96c0 366 spl_debug_dump_all_pages(dp, spl_debug_file_name);
57d1b188 367
368 current->journal_info = journal_info;
369}
370
371static int
372spl_debug_dumplog_thread(void *arg)
373{
374 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
375
376 spl_debug_dumplog_internal(dp);
7fea96c0 377 atomic_set(&dp->dp_done, 1);
57d1b188 378 wake_up(&dp->dp_waitq);
379 do_exit(0);
380
381 return 0; /* Unreachable */
382}
383
7fea96c0 384/* When flag is set do not use a new thread for the debug dump */
57d1b188 385int
7fea96c0 386spl_debug_dumplog(int flags)
57d1b188 387{
388 struct task_struct *tsk;
389 dumplog_priv_t dp;
57d1b188 390
7fea96c0 391 init_waitqueue_head(&dp.dp_waitq);
392 dp.dp_pid = current->pid;
393 dp.dp_flags = flags;
394 atomic_set(&dp.dp_done, 0);
57d1b188 395
7fea96c0 396 if (dp.dp_flags & DL_NOTHREAD) {
397 spl_debug_dumplog_internal(&dp);
398 } else {
57d1b188 399
7fea96c0 400 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
401 if (tsk == NULL)
402 return -ENOMEM;
403
404 wake_up_process(tsk);
405 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
406 }
57d1b188 407
a8ac0b89 408 return 0;
57d1b188 409}
410EXPORT_SYMBOL(spl_debug_dumplog);
411
412static char *
413trace_get_console_buffer(void)
414{
415 int cpu = get_cpu();
416 int idx;
417
418 if (in_irq()) {
419 idx = 0;
420 } else if (in_softirq()) {
421 idx = 1;
422 } else {
423 idx = 2;
424 }
425
426 return trace_console_buffers[cpu][idx];
427}
428
429static void
430trace_put_console_buffer(char *buffer)
431{
432 put_cpu();
433}
434
435static struct trace_cpu_data *
436trace_get_tcd(void)
437{
438 int cpu;
439
440 cpu = get_cpu();
441 if (in_irq())
442 return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
443 else if (in_softirq())
444 return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
445
446 return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
447}
448
449static void
450trace_put_tcd (struct trace_cpu_data *tcd)
451{
452 put_cpu();
453}
454
455static int
456trace_lock_tcd(struct trace_cpu_data *tcd)
457{
458 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
459
460 if (tcd->tcd_type == TCD_TYPE_IRQ)
461 local_irq_disable();
462 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
463 local_bh_disable();
464
465 return 1;
466}
467
468static void
469trace_unlock_tcd(struct trace_cpu_data *tcd)
470{
471 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
472
473 if (tcd->tcd_type == TCD_TYPE_IRQ)
474 local_irq_enable();
475 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
476 local_bh_enable();
477}
478
479static void
480trace_set_debug_header(struct spl_debug_header *header, int subsys,
481 int mask, const int line, unsigned long stack)
482{
483 struct timeval tv;
484
485 do_gettimeofday(&tv);
486
487 header->ph_subsys = subsys;
488 header->ph_mask = mask;
489 header->ph_cpu_id = smp_processor_id();
490 header->ph_sec = (__u32)tv.tv_sec;
491 header->ph_usec = tv.tv_usec;
492 header->ph_stack = stack;
493 header->ph_pid = current->pid;
494 header->ph_line_num = line;
495
496 return;
497}
498
499static void
500trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
501 int len, const char *file, const char *fn)
502{
503 char *prefix = "SPL", *ptype = NULL;
504
505 if ((mask & D_EMERG) != 0) {
506 prefix = "SPLError";
507 ptype = KERN_EMERG;
508 } else if ((mask & D_ERROR) != 0) {
509 prefix = "SPLError";
510 ptype = KERN_ERR;
511 } else if ((mask & D_WARNING) != 0) {
512 prefix = "SPL";
513 ptype = KERN_WARNING;
514 } else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
515 prefix = "SPL";
516 ptype = KERN_INFO;
517 }
518
519 if ((mask & D_CONSOLE) != 0) {
520 printk("%s%s: %.*s", ptype, prefix, len, buf);
521 } else {
892d5106 522 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
523 hdr->ph_pid, hdr->ph_stack, file,
524 hdr->ph_line_num, fn, len, buf);
57d1b188 525 }
526
527 return;
528}
529
530static int
531trace_max_debug_mb(void)
532{
533 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
534}
535
536static void
537trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
538{
539 cpumask_t mask, cpus_allowed = current->cpus_allowed;
540 int cpu;
541
542 for_each_online_cpu(cpu) {
543 cpus_clear(mask);
544 cpu_set(cpu, mask);
545 set_cpus_allowed(current, mask);
546
547 fn(arg);
548
549 set_cpus_allowed(current, cpus_allowed);
550 }
551}
552
553static struct trace_page *
554tage_alloc(int gfp)
555{
556 struct page *page;
557 struct trace_page *tage;
558
559 page = alloc_pages(gfp | __GFP_NOWARN, 0);
560 if (page == NULL)
561 return NULL;
562
563 tage = kmalloc(sizeof(*tage), gfp);
564 if (tage == NULL) {
565 __free_pages(page, 0);
566 return NULL;
567 }
568
569 tage->page = page;
570 atomic_inc(&trace_tage_allocated);
571
572 return tage;
573}
574
575static void
576tage_free(struct trace_page *tage)
577{
578 __ASSERT(tage != NULL);
579 __ASSERT(tage->page != NULL);
580
581 __free_pages(tage->page, 0);
582 kfree(tage);
583 atomic_dec(&trace_tage_allocated);
584}
585
586static struct trace_page *
587tage_from_list(struct list_head *list)
588{
589 return list_entry(list, struct trace_page, linkage);
590}
591
592static void
593tage_to_tail(struct trace_page *tage, struct list_head *queue)
594{
595 __ASSERT(tage != NULL);
596 __ASSERT(queue != NULL);
597
598 list_move_tail(&tage->linkage, queue);
599}
600
601/* try to return a page that has 'len' bytes left at the end */
602static struct trace_page *
603trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
604{
605 struct trace_page *tage;
606
607 if (tcd->tcd_cur_pages > 0) {
608 __ASSERT(!list_empty(&tcd->tcd_pages));
609 tage = tage_from_list(tcd->tcd_pages.prev);
610 if (tage->used + len <= PAGE_SIZE)
611 return tage;
612 }
613
614 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
615 if (tcd->tcd_cur_stock_pages > 0) {
616 tage = tage_from_list(tcd->tcd_stock_pages.prev);
617 tcd->tcd_cur_stock_pages--;
618 list_del_init(&tage->linkage);
619 } else {
620 tage = tage_alloc(GFP_ATOMIC);
621 if (tage == NULL) {
622 printk(KERN_WARNING
623 "failure to allocate a tage (%ld)\n",
624 tcd->tcd_cur_pages);
625 return NULL;
626 }
627 }
628
629 tage->used = 0;
630 tage->cpu = smp_processor_id();
631 tage->type = tcd->tcd_type;
632 list_add_tail(&tage->linkage, &tcd->tcd_pages);
633 tcd->tcd_cur_pages++;
634
635 return tage;
636 }
637
638 return NULL;
639}
640
641/* return a page that has 'len' bytes left at the end */
642static struct trace_page *
643trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
644{
645 struct trace_page *tage;
646
647 __ASSERT(len <= PAGE_SIZE);
648
649 tage = trace_get_tage_try(tcd, len);
650 if (tage)
651 return tage;
652
653 if (tcd->tcd_cur_pages > 0) {
654 tage = tage_from_list(tcd->tcd_pages.next);
655 tage->used = 0;
656 tage_to_tail(tage, &tcd->tcd_pages);
657 }
658
659 return tage;
660}
661
662int
663spl_debug_vmsg(spl_debug_limit_state_t *cdls, int subsys, int mask,
664 const char *file, const char *fn, const int line,
665 const char *format1, va_list args, const char *format2, ...)
666{
667 struct trace_cpu_data *tcd = NULL;
668 struct spl_debug_header header;
669 struct trace_page *tage;
670 /* string_buf is used only if tcd != NULL, and is always set then */
671 char *string_buf = NULL;
672 char *debug_buf;
673 int known_size;
674 int needed = 85; /* average message length */
675 int max_nob;
676 va_list ap;
677 int i;
678 int remain;
679
680 if (strchr(file, '/'))
681 file = strrchr(file, '/') + 1;
682
683 trace_set_debug_header(&header, subsys, mask, line, CDEBUG_STACK());
684
685 tcd = trace_get_tcd();
686 if (tcd == NULL)
687 goto console;
688
689 if (tcd->tcd_shutting_down) {
690 trace_put_tcd(tcd);
691 tcd = NULL;
692 goto console;
693 }
694
695 known_size = strlen(file) + 1;
696 if (fn)
697 known_size += strlen(fn) + 1;
698
699 if (spl_debug_binary)
700 known_size += sizeof(header);
701
702 /* '2' used because vsnprintf returns real size required for output
703 * _without_ terminating NULL. */
704 for (i = 0; i < 2; i++) {
705 tage = trace_get_tage(tcd, needed + known_size + 1);
706 if (tage == NULL) {
707 if (needed + known_size > PAGE_SIZE)
708 mask |= D_ERROR;
709
710 trace_put_tcd(tcd);
711 tcd = NULL;
712 goto console;
713 }
714
715 string_buf = (char *)page_address(tage->page) +
716 tage->used + known_size;
717
718 max_nob = PAGE_SIZE - tage->used - known_size;
719 if (max_nob <= 0) {
720 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
721 mask |= D_ERROR;
722 trace_put_tcd(tcd);
723 tcd = NULL;
724 goto console;
725 }
726
727 needed = 0;
728 if (format1) {
729 va_copy(ap, args);
730 needed = vsnprintf(string_buf, max_nob, format1, ap);
731 va_end(ap);
732 }
733
734 if (format2) {
735 remain = max_nob - needed;
736 if (remain < 0)
737 remain = 0;
738
739 va_start(ap, format2);
740 needed += vsnprintf(string_buf+needed, remain, format2, ap);
741 va_end(ap);
742 }
743
744 if (needed < max_nob)
745 break;
746 }
747
748 if (unlikely(*(string_buf + needed - 1) != '\n'))
749 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
750 file, line, fn);
751
752 header.ph_len = known_size + needed;
753 debug_buf = (char *)page_address(tage->page) + tage->used;
754
755 if (spl_debug_binary) {
756 memcpy(debug_buf, &header, sizeof(header));
757 tage->used += sizeof(header);
758 debug_buf += sizeof(header);
759 }
760
761 strcpy(debug_buf, file);
762 tage->used += strlen(file) + 1;
763 debug_buf += strlen(file) + 1;
764
765 if (fn) {
766 strcpy(debug_buf, fn);
767 tage->used += strlen(fn) + 1;
768 debug_buf += strlen(fn) + 1;
769 }
770
771 __ASSERT(debug_buf == string_buf);
772
773 tage->used += needed;
774 __ASSERT (tage->used <= PAGE_SIZE);
775
776console:
777 if ((mask & spl_debug_printk) == 0) {
778 /* no console output requested */
779 if (tcd != NULL)
780 trace_put_tcd(tcd);
781 return 1;
782 }
783
784 if (cdls != NULL) {
785 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
786 !time_before(cdls->cdls_next, jiffies)) {
787 /* skipping a console message */
788 cdls->cdls_count++;
789 if (tcd != NULL)
790 trace_put_tcd(tcd);
791 return 1;
792 }
793
794 if (time_before(cdls->cdls_next + spl_console_max_delay +
795 (10 * HZ), jiffies)) {
796 /* last timeout was a long time ago */
797 cdls->cdls_delay /= spl_console_backoff * 4;
798 } else {
799 cdls->cdls_delay *= spl_console_backoff;
800
801 if (cdls->cdls_delay < spl_console_min_delay)
802 cdls->cdls_delay = spl_console_min_delay;
803 else if (cdls->cdls_delay > spl_console_max_delay)
804 cdls->cdls_delay = spl_console_max_delay;
805 }
806
807 /* ensure cdls_next is never zero after it's been seen */
808 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
809 }
810
811 if (tcd != NULL) {
812 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
813 trace_put_tcd(tcd);
814 } else {
815 string_buf = trace_get_console_buffer();
816
817 needed = 0;
818 if (format1 != NULL) {
819 va_copy(ap, args);
820 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
821 va_end(ap);
822 }
823 if (format2 != NULL) {
824 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
825 if (remain > 0) {
826 va_start(ap, format2);
827 needed += vsnprintf(string_buf+needed, remain, format2, ap);
828 va_end(ap);
829 }
830 }
831 trace_print_to_console(&header, mask,
832 string_buf, needed, file, fn);
833
834 trace_put_console_buffer(string_buf);
835 }
836
837 if (cdls != NULL && cdls->cdls_count != 0) {
838 string_buf = trace_get_console_buffer();
839
840 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
841 "Skipped %d previous similar message%s\n",
842 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
843
844 trace_print_to_console(&header, mask,
845 string_buf, needed, file, fn);
846
847 trace_put_console_buffer(string_buf);
848 cdls->cdls_count = 0;
849 }
850
851 return 0;
852}
853EXPORT_SYMBOL(spl_debug_vmsg);
854
855/* Do the collect_pages job on a single CPU: assumes that all other
856 * CPUs have been stopped during a panic. If this isn't true for
857 * some arch, this will have to be implemented separately in each arch.
858 */
859static void
7fea96c0 860collect_pages_from_single_cpu(struct page_collection *pc)
57d1b188 861{
862 struct trace_cpu_data *tcd;
863 int i, j;
864
865 tcd_for_each(tcd, i, j) {
866 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
867 tcd->tcd_cur_pages = 0;
868 }
869}
870
871static void
872collect_pages_on_cpu(void *info)
873{
874 struct trace_cpu_data *tcd;
875 struct page_collection *pc = info;
876 int i;
877
878 spin_lock(&pc->pc_lock);
879 tcd_for_each_type_lock(tcd, i) {
880 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
881 tcd->tcd_cur_pages = 0;
882 }
883 spin_unlock(&pc->pc_lock);
884}
885
886static void
7fea96c0 887collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
57d1b188 888{
889 INIT_LIST_HEAD(&pc->pc_pages);
890
7fea96c0 891 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
892 collect_pages_from_single_cpu(pc);
57d1b188 893 else
894 trace_call_on_all_cpus(collect_pages_on_cpu, pc);
895}
896
897static void
898put_pages_back_on_cpu(void *info)
899{
900 struct page_collection *pc = info;
901 struct trace_cpu_data *tcd;
902 struct list_head *cur_head;
903 struct trace_page *tage;
904 struct trace_page *tmp;
905 int i;
906
907 spin_lock(&pc->pc_lock);
908 tcd_for_each_type_lock(tcd, i) {
909 cur_head = tcd->tcd_pages.next;
910
911 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
912
913 __ASSERT_TAGE_INVARIANT(tage);
914
915 if (tage->cpu != smp_processor_id() || tage->type != i)
916 continue;
917
918 tage_to_tail(tage, cur_head);
919 tcd->tcd_cur_pages++;
920 }
921 }
922 spin_unlock(&pc->pc_lock);
923}
924
925static void
926put_pages_back(struct page_collection *pc)
927{
928 if (!spl_panic_in_progress)
929 trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
930}
931
932static struct file *
933trace_filp_open (const char *name, int flags, int mode, int *err)
934{
935 struct file *filp = NULL;
936 int rc;
937
938 filp = filp_open(name, flags, mode);
939 if (IS_ERR(filp)) {
940 rc = PTR_ERR(filp);
941 printk(KERN_ERR "SPL: Can't open %s file: %d\n", name, rc);
942 if (err)
943 *err = rc;
944 filp = NULL;
945 }
946 return filp;
947}
948
949#define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
950#define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
951#define trace_filp_close(f) filp_close(f, NULL)
952#define trace_filp_poff(f) (&(f)->f_pos)
953
954static int
7fea96c0 955spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
57d1b188 956{
957 struct page_collection pc;
958 struct file *filp;
959 struct trace_page *tage;
960 struct trace_page *tmp;
961 mm_segment_t oldfs;
962 int rc = 0;
963
964 down_write(&trace_sem);
965
966 filp = trace_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
967 0600, &rc);
968 if (filp == NULL) {
969 if (rc != -EEXIST)
970 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
971 filename, rc);
972 goto out;
973 }
974
975 spin_lock_init(&pc.pc_lock);
7fea96c0 976 collect_pages(dp, &pc);
57d1b188 977 if (list_empty(&pc.pc_pages)) {
978 rc = 0;
979 goto close;
980 }
981
982 oldfs = get_fs();
983 set_fs(get_ds());
984
985 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
986 __ASSERT_TAGE_INVARIANT(tage);
987
988 rc = trace_filp_write(filp, page_address(tage->page),
989 tage->used, trace_filp_poff(filp));
990 if (rc != (int)tage->used) {
991 printk(KERN_WARNING "SPL: Wanted to write %u "
992 "but wrote %d\n", tage->used, rc);
993 put_pages_back(&pc);
994 __ASSERT(list_empty(&pc.pc_pages));
995 break;
996 }
997 list_del(&tage->linkage);
998 tage_free(tage);
999 }
1000
1001 set_fs(oldfs);
1002
1003 rc = trace_filp_fsync(filp);
1004 if (rc)
1005 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
1006 close:
1007 trace_filp_close(filp);
1008 out:
1009 up_write(&trace_sem);
1010
1011 return rc;
1012}
1013
1014static void
1015spl_debug_flush_pages(void)
1016{
7fea96c0 1017 dumplog_priv_t dp;
57d1b188 1018 struct page_collection pc;
1019 struct trace_page *tage;
1020 struct trace_page *tmp;
1021
1022 spin_lock_init(&pc.pc_lock);
7fea96c0 1023 init_waitqueue_head(&dp.dp_waitq);
1024 dp.dp_pid = current->pid;
1025 dp.dp_flags = 0;
1026 atomic_set(&dp.dp_done, 0);
57d1b188 1027
7fea96c0 1028 collect_pages(&dp, &pc);
57d1b188 1029 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1030 __ASSERT_TAGE_INVARIANT(tage);
1031 list_del(&tage->linkage);
1032 tage_free(tage);
1033 }
1034}
1035
1036unsigned long
1037spl_debug_set_mask(unsigned long mask) {
1038 spl_debug_mask = mask;
1039 return 0;
1040}
1041EXPORT_SYMBOL(spl_debug_set_mask);
1042
1043unsigned long
1044spl_debug_get_mask(void) {
1045 return spl_debug_mask;
1046}
1047EXPORT_SYMBOL(spl_debug_get_mask);
1048
1049unsigned long
1050spl_debug_set_subsys(unsigned long subsys) {
1051 spl_debug_subsys = subsys;
1052 return 0;
1053}
1054EXPORT_SYMBOL(spl_debug_set_subsys);
1055
1056unsigned long
1057spl_debug_get_subsys(void) {
1058 return spl_debug_subsys;
1059}
1060EXPORT_SYMBOL(spl_debug_get_subsys);
1061
1062int
1063spl_debug_set_mb(int mb)
1064{
1065 int i, j, pages;
1066 int limit = trace_max_debug_mb();
1067 struct trace_cpu_data *tcd;
1068
1069 if (mb < num_possible_cpus()) {
1070 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1071 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1072 return -EINVAL;
1073 }
1074
1075 if (mb > limit) {
1076 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1077 "%dMB - upper limit is %d\n", mb, limit);
1078 return -EINVAL;
1079 }
1080
1081 mb /= num_possible_cpus();
1082 pages = mb << (20 - PAGE_SHIFT);
1083
1084 down_write(&trace_sem);
1085
1086 tcd_for_each(tcd, i, j)
1087 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1088
1089 up_write(&trace_sem);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(spl_debug_set_mb);
1094
1095int
1096spl_debug_get_mb(void)
1097{
1098 int i, j;
1099 struct trace_cpu_data *tcd;
1100 int total_pages = 0;
1101
1102 down_read(&trace_sem);
1103
1104 tcd_for_each(tcd, i, j)
1105 total_pages += tcd->tcd_max_pages;
1106
1107 up_read(&trace_sem);
1108
1109 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1110}
1111EXPORT_SYMBOL(spl_debug_get_mb);
1112
1113void spl_debug_dumpstack(struct task_struct *tsk)
1114{
1115 extern void show_task(struct task_struct *);
1116
1117 if (tsk == NULL)
1118 tsk = current;
1119
892d5106 1120 printk(KERN_ERR "SPL: Showing stack for process %d\n", tsk->pid);
57d1b188 1121 show_task(tsk);
1122}
1123EXPORT_SYMBOL(spl_debug_dumpstack);
1124
7fea96c0 1125void spl_debug_bug(char *file, const char *func, const int line, int flags)
57d1b188 1126{
1127 spl_debug_catastrophe = 1;
937879f1 1128 spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
57d1b188 1129
1130 if (in_interrupt()) {
937879f1 1131 panic("SBUG in interrupt.\n");
57d1b188 1132 /* not reached */
1133 }
1134
1135 /* Ensure all debug pages and dumped by current cpu */
1136 if (spl_debug_panic_on_bug)
1137 spl_panic_in_progress = 1;
1138
1139 spl_debug_dumpstack(NULL);
7fea96c0 1140 spl_debug_dumplog(flags);
57d1b188 1141
1142 if (spl_debug_panic_on_bug)
937879f1 1143 panic("SBUG");
57d1b188 1144
1145 set_task_state(current, TASK_UNINTERRUPTIBLE);
1146 while (1)
1147 schedule();
1148}
1149EXPORT_SYMBOL(spl_debug_bug);
1150
1151int
1152spl_debug_clear_buffer(void)
1153{
1154 spl_debug_flush_pages();
1155 return 0;
1156}
1157EXPORT_SYMBOL(spl_debug_clear_buffer);
1158
1159int
1160spl_debug_mark_buffer(char *text)
1161{
1162 CDEBUG(D_WARNING, "*************************************\n");
1163 CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
1164 CDEBUG(D_WARNING, "*************************************\n");
1165
1166 return 0;
1167}
1168EXPORT_SYMBOL(spl_debug_mark_buffer);
1169
1170static int
1171panic_notifier(struct notifier_block *self,
1172 unsigned long unused1, void *unused2)
1173{
1174 if (spl_panic_in_progress)
1175 return 0;
1176
1177 spl_panic_in_progress = 1;
1178 mb();
1179
1180 if (!in_interrupt()) {
1181 while (current->lock_depth >= 0)
1182 unlock_kernel();
1183
7fea96c0 1184 spl_debug_dumplog(DL_NOTHREAD | DL_SINGLE_CPU);
57d1b188 1185 }
1186
1187 return 0;
1188}
1189
1190static int
1191trace_init(int max_pages)
1192{
1193 struct trace_cpu_data *tcd;
1194 int i, j;
1195
1196 init_rwsem(&trace_sem);
1197
1198 /* initialize trace_data */
1199 memset(trace_data, 0, sizeof(trace_data));
1200 for (i = 0; i < TCD_TYPE_MAX; i++) {
1201 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1202 NR_CPUS, GFP_KERNEL);
1203 if (trace_data[i] == NULL)
1204 goto out;
1205 }
1206
1207 tcd_for_each(tcd, i, j) {
1208 tcd->tcd_pages_factor = pages_factor[i];
1209 tcd->tcd_type = i;
1210 tcd->tcd_cpu = j;
1211 INIT_LIST_HEAD(&tcd->tcd_pages);
1212 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1213 tcd->tcd_cur_pages = 0;
1214 tcd->tcd_cur_stock_pages = 0;
1215 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1216 tcd->tcd_shutting_down = 0;
1217 }
1218
1219 for (i = 0; i < num_possible_cpus(); i++) {
1220 for (j = 0; j < 3; j++) {
1221 trace_console_buffers[i][j] =
1222 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1223 GFP_KERNEL);
1224
1225 if (trace_console_buffers[i][j] == NULL)
1226 goto out;
1227 }
1228 }
1229
1230 return 0;
1231out:
1232 trace_fini();
1233 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1234 return -ENOMEM;
1235}
1236
1237int
1238debug_init(void)
1239{
1240 int rc, max = spl_debug_mb;
1241
1242 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1243 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1244
1245 /* If spl_debug_mb is set to an invalid value or uninitialized
1246 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1247 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1248 max >= 512 || max < 0) {
1249 max = TCD_MAX_PAGES;
1250 } else {
1251 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1252 }
1253
1254 rc = trace_init(max);
1255 if (rc)
1256 return rc;
1257
1258 atomic_notifier_chain_register(&panic_notifier_list,
1259 &spl_panic_notifier);
1260 return rc;
1261}
1262
1263static void
1264trace_cleanup_on_cpu(void *info)
1265{
1266 struct trace_cpu_data *tcd;
1267 struct trace_page *tage;
1268 struct trace_page *tmp;
1269 int i;
1270
1271 tcd_for_each_type_lock(tcd, i) {
1272 tcd->tcd_shutting_down = 1;
1273
1274 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1275 __ASSERT_TAGE_INVARIANT(tage);
1276
1277 list_del(&tage->linkage);
1278 tage_free(tage);
1279 }
1280 tcd->tcd_cur_pages = 0;
1281 }
1282}
1283
1284static void
1285trace_fini(void)
1286{
1287 int i, j;
1288
1289 trace_call_on_all_cpus(trace_cleanup_on_cpu, NULL);
1290
1291 for (i = 0; i < num_possible_cpus(); i++) {
1292 for (j = 0; j < 3; j++) {
1293 if (trace_console_buffers[i][j] != NULL) {
1294 kfree(trace_console_buffers[i][j]);
1295 trace_console_buffers[i][j] = NULL;
1296 }
1297 }
1298 }
1299
1300 for (i = 0; trace_data[i] != NULL; i++) {
1301 kfree(trace_data[i]);
1302 trace_data[i] = NULL;
1303 }
1304}
1305
1306void
1307debug_fini(void)
1308{
1309 atomic_notifier_chain_unregister(&panic_notifier_list,
1310 &spl_panic_notifier);
1311 trace_fini();
1312
1313 return;
1314}