]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-debug.c
Ensure -NDEBUG does not get added to spl_config.h and is only set in the build option...
[mirror_spl.git] / module / spl / spl-debug.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * This file was originally part of Lustre, http://www.lustre.org.
5 * but has subsequently been adapted for use in the SPL in
6 * accordance with the GPL.
7 *
8 * Copyright (C) 2004 Cluster File Systems, Inc.
9 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
10 * Produced at Lawrence Livermore National Laboratory
11 * Written by:
12 * Zach Brown <zab@clusterfs.com>
13 * Phil Schwan <phil@clusterfs.com>
14 * Brian Behlendorf <behlendorf1@llnl.gov>,
15 * Herb Wartens <wartens2@llnl.gov>,
16 * Jim Garlick <garlick@llnl.gov>
17 * UCRL-CODE-235197
18 *
19 * This is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This is distributed in the hope that it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 */
33
34 #include <linux/kmod.h>
35 #include <linux/mm.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pagemap.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/hardirq.h>
42 #include <linux/interrupt.h>
43 #include <linux/spinlock.h>
44 #include <sys/sysmacros.h>
45 #include <sys/proc.h>
46 #include <sys/debug.h>
47 #include <spl-ctl.h>
48
49 #ifdef DEBUG_SUBSYSTEM
50 #undef DEBUG_SUBSYSTEM
51 #endif
52
53 #define DEBUG_SUBSYSTEM S_DEBUG
54
55 unsigned long spl_debug_subsys = ~0;
56 EXPORT_SYMBOL(spl_debug_subsys);
57 module_param(spl_debug_subsys, long, 0644);
58 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
59
60 unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
61 EXPORT_SYMBOL(spl_debug_mask);
62 module_param(spl_debug_mask, long, 0644);
63 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
64
65 unsigned long spl_debug_printk = D_CANTMASK;
66 EXPORT_SYMBOL(spl_debug_printk);
67 module_param(spl_debug_printk, long, 0644);
68 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
69
70 int spl_debug_mb = -1;
71 EXPORT_SYMBOL(spl_debug_mb);
72 module_param(spl_debug_mb, int, 0644);
73 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
74
75 unsigned int spl_debug_binary = 1;
76 EXPORT_SYMBOL(spl_debug_binary);
77
78 unsigned int spl_debug_catastrophe;
79 EXPORT_SYMBOL(spl_debug_catastrophe);
80
81 unsigned int spl_debug_panic_on_bug = 1;
82 EXPORT_SYMBOL(spl_debug_panic_on_bug);
83 module_param(spl_debug_panic_on_bug, int, 0644);
84 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
85
86 static char spl_debug_file_name[PATH_MAX];
87 char spl_debug_file_path[PATH_MAX] = "/var/dumps/spl-log";
88
89 unsigned int spl_console_ratelimit = 1;
90 EXPORT_SYMBOL(spl_console_ratelimit);
91
92 long spl_console_max_delay;
93 EXPORT_SYMBOL(spl_console_max_delay);
94
95 long spl_console_min_delay;
96 EXPORT_SYMBOL(spl_console_min_delay);
97
98 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
99 EXPORT_SYMBOL(spl_console_backoff);
100
101 unsigned int spl_debug_stack;
102 EXPORT_SYMBOL(spl_debug_stack);
103
104 static int spl_panic_in_progress;
105
106 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
107 char *trace_console_buffers[NR_CPUS][3];
108 struct rw_semaphore trace_sem;
109 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
110
111 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
112 static void trace_fini(void);
113
114
115 /* Memory percentage breakdown by type */
116 static unsigned int pages_factor[TCD_TYPE_MAX] = {
117 80, /* 80% pages for TCD_TYPE_PROC */
118 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
119 10 /* 10% pages for TCD_TYPE_IRQ */
120 };
121
122 const char *
123 spl_debug_subsys2str(int subsys)
124 {
125 switch (subsys) {
126 default:
127 return NULL;
128 case S_UNDEFINED:
129 return "undefined";
130 case S_ATOMIC:
131 return "atomic";
132 case S_KOBJ:
133 return "kobj";
134 case S_VNODE:
135 return "vnode";
136 case S_TIME:
137 return "time";
138 case S_RWLOCK:
139 return "rwlock";
140 case S_THREAD:
141 return "thread";
142 case S_CONDVAR:
143 return "condvar";
144 case S_MUTEX:
145 return "mutex";
146 case S_RNG:
147 return "rng";
148 case S_TASKQ:
149 return "taskq";
150 case S_KMEM:
151 return "kmem";
152 case S_DEBUG:
153 return "debug";
154 case S_GENERIC:
155 return "generic";
156 case S_PROC:
157 return "proc";
158 case S_MODULE:
159 return "module";
160 }
161 }
162
163 const char *
164 spl_debug_dbg2str(int debug)
165 {
166 switch (debug) {
167 default:
168 return NULL;
169 case D_TRACE:
170 return "trace";
171 case D_INFO:
172 return "info";
173 case D_WARNING:
174 return "warning";
175 case D_ERROR:
176 return "error";
177 case D_EMERG:
178 return "emerg";
179 case D_CONSOLE:
180 return "console";
181 case D_IOCTL:
182 return "ioctl";
183 case D_DPRINTF:
184 return "dprintf";
185 case D_OTHER:
186 return "other";
187 }
188 }
189
190 int
191 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
192 {
193 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
194 spl_debug_dbg2str;
195 const char *token;
196 int i, bit, len = 0;
197
198 if (mask == 0) { /* "0" */
199 if (size > 0)
200 str[0] = '0';
201 len = 1;
202 } else { /* space-separated tokens */
203 for (i = 0; i < 32; i++) {
204 bit = 1 << i;
205
206 if ((mask & bit) == 0)
207 continue;
208
209 token = fn(bit);
210 if (token == NULL) /* unused bit */
211 continue;
212
213 if (len > 0) { /* separator? */
214 if (len < size)
215 str[len] = ' ';
216 len++;
217 }
218
219 while (*token != 0) {
220 if (len < size)
221 str[len] = *token;
222 token++;
223 len++;
224 }
225 }
226 }
227
228 /* terminate 'str' */
229 if (len < size)
230 str[len] = 0;
231 else
232 str[size - 1] = 0;
233
234 return len;
235 }
236
237 static int
238 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
239 {
240 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
241 spl_debug_dbg2str;
242 const char *token;
243 int i, j, bit;
244
245 /* match against known tokens */
246 for (i = 0; i < 32; i++) {
247 bit = 1 << i;
248
249 token = fn(bit);
250 if (token == NULL) /* unused? */
251 continue;
252
253 /* strcasecmp */
254 for (j = 0; ; j++) {
255 if (j == len) { /* end of token */
256 if (token[j] == 0) {
257 *mask = bit;
258 return 0;
259 }
260 break;
261 }
262
263 if (token[j] == 0)
264 break;
265
266 if (str[j] == token[j])
267 continue;
268
269 if (str[j] < 'A' || 'Z' < str[j])
270 break;
271
272 if (str[j] - 'A' + 'a' != token[j])
273 break;
274 }
275 }
276
277 return -EINVAL; /* no match */
278 }
279
280 int
281 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
282 {
283 char op = 0;
284 int m = 0, matched, n, t;
285
286 /* Allow a number for backwards compatibility */
287 for (n = strlen(str); n > 0; n--)
288 if (!isspace(str[n-1]))
289 break;
290 matched = n;
291
292 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
293 *mask = m;
294 return 0;
295 }
296
297 /* <str> must be a list of debug tokens or numbers separated by
298 * whitespace and optionally an operator ('+' or '-'). If an operator
299 * appears first in <str>, '*mask' is used as the starting point
300 * (relative), otherwise 0 is used (absolute). An operator applies to
301 * all following tokens up to the next operator. */
302 matched = 0;
303 while (*str != 0) {
304 while (isspace(*str)) /* skip whitespace */
305 str++;
306
307 if (*str == 0)
308 break;
309
310 if (*str == '+' || *str == '-') {
311 op = *str++;
312
313 /* op on first token == relative */
314 if (!matched)
315 m = *mask;
316
317 while (isspace(*str)) /* skip whitespace */
318 str++;
319
320 if (*str == 0) /* trailing op */
321 return -EINVAL;
322 }
323
324 /* find token length */
325 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
326
327 /* match token */
328 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
329 return -EINVAL;
330
331 matched = 1;
332 if (op == '-')
333 m &= ~t;
334 else
335 m |= t;
336
337 str += n;
338 }
339
340 if (!matched)
341 return -EINVAL;
342
343 *mask = m;
344 return 0;
345 }
346
347 static void
348 spl_debug_dumplog_internal(dumplog_priv_t *dp)
349 {
350 void *journal_info;
351
352 journal_info = current->journal_info;
353 current->journal_info = NULL;
354
355 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
356 "%s.%ld.%ld", spl_debug_file_path,
357 get_seconds(), (long)dp->dp_pid);
358 printk(KERN_ALERT "SPL: dumping log to %s\n", spl_debug_file_name);
359 spl_debug_dump_all_pages(dp, spl_debug_file_name);
360
361 current->journal_info = journal_info;
362 }
363
364 static int
365 spl_debug_dumplog_thread(void *arg)
366 {
367 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
368
369 spl_debug_dumplog_internal(dp);
370 atomic_set(&dp->dp_done, 1);
371 wake_up(&dp->dp_waitq);
372 complete_and_exit(NULL, 0);
373
374 return 0; /* Unreachable */
375 }
376
377 /* When flag is set do not use a new thread for the debug dump */
378 int
379 spl_debug_dumplog(int flags)
380 {
381 struct task_struct *tsk;
382 dumplog_priv_t dp;
383
384 init_waitqueue_head(&dp.dp_waitq);
385 dp.dp_pid = current->pid;
386 dp.dp_flags = flags;
387 atomic_set(&dp.dp_done, 0);
388
389 if (dp.dp_flags & DL_NOTHREAD) {
390 spl_debug_dumplog_internal(&dp);
391 } else {
392
393 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
394 if (tsk == NULL)
395 return -ENOMEM;
396
397 wake_up_process(tsk);
398 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
399 }
400
401 return 0;
402 }
403 EXPORT_SYMBOL(spl_debug_dumplog);
404
405 static char *
406 trace_get_console_buffer(void)
407 {
408 int cpu = get_cpu();
409 int idx;
410
411 if (in_irq()) {
412 idx = 0;
413 } else if (in_softirq()) {
414 idx = 1;
415 } else {
416 idx = 2;
417 }
418
419 return trace_console_buffers[cpu][idx];
420 }
421
422 static void
423 trace_put_console_buffer(char *buffer)
424 {
425 put_cpu();
426 }
427
428 static int
429 trace_lock_tcd(struct trace_cpu_data *tcd)
430 {
431 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
432
433 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
434
435 return 1;
436 }
437
438 static void
439 trace_unlock_tcd(struct trace_cpu_data *tcd)
440 {
441 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
442
443 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
444 }
445
446 static struct trace_cpu_data *
447 trace_get_tcd(void)
448 {
449 int cpu;
450 struct trace_cpu_data *tcd;
451
452 cpu = get_cpu();
453 if (in_irq())
454 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
455 else if (in_softirq())
456 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
457 else
458 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
459
460 trace_lock_tcd(tcd);
461
462 return tcd;
463 }
464
465 static void
466 trace_put_tcd (struct trace_cpu_data *tcd)
467 {
468 trace_unlock_tcd(tcd);
469
470 put_cpu();
471 }
472
473 static void
474 trace_set_debug_header(struct spl_debug_header *header, int subsys,
475 int mask, const int line, unsigned long stack)
476 {
477 struct timeval tv;
478
479 do_gettimeofday(&tv);
480
481 header->ph_subsys = subsys;
482 header->ph_mask = mask;
483 header->ph_cpu_id = smp_processor_id();
484 header->ph_sec = (__u32)tv.tv_sec;
485 header->ph_usec = tv.tv_usec;
486 header->ph_stack = stack;
487 header->ph_pid = current->pid;
488 header->ph_line_num = line;
489
490 return;
491 }
492
493 static void
494 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
495 int len, const char *file, const char *fn)
496 {
497 char *prefix = "SPL", *ptype = NULL;
498
499 if ((mask & D_EMERG) != 0) {
500 prefix = "SPLError";
501 ptype = KERN_EMERG;
502 } else if ((mask & D_ERROR) != 0) {
503 prefix = "SPLError";
504 ptype = KERN_ERR;
505 } else if ((mask & D_WARNING) != 0) {
506 prefix = "SPL";
507 ptype = KERN_WARNING;
508 } else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
509 prefix = "SPL";
510 ptype = KERN_INFO;
511 }
512
513 if ((mask & D_CONSOLE) != 0) {
514 printk("%s%s: %.*s", ptype, prefix, len, buf);
515 } else {
516 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
517 hdr->ph_pid, hdr->ph_stack, file,
518 hdr->ph_line_num, fn, len, buf);
519 }
520
521 return;
522 }
523
524 static int
525 trace_max_debug_mb(void)
526 {
527 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
528 }
529
530 static struct trace_page *
531 tage_alloc(int gfp)
532 {
533 struct page *page;
534 struct trace_page *tage;
535
536 page = alloc_pages(gfp | __GFP_NOWARN, 0);
537 if (page == NULL)
538 return NULL;
539
540 tage = kmalloc(sizeof(*tage), gfp);
541 if (tage == NULL) {
542 __free_pages(page, 0);
543 return NULL;
544 }
545
546 tage->page = page;
547 atomic_inc(&trace_tage_allocated);
548
549 return tage;
550 }
551
552 static void
553 tage_free(struct trace_page *tage)
554 {
555 __ASSERT(tage != NULL);
556 __ASSERT(tage->page != NULL);
557
558 __free_pages(tage->page, 0);
559 kfree(tage);
560 atomic_dec(&trace_tage_allocated);
561 }
562
563 static struct trace_page *
564 tage_from_list(struct list_head *list)
565 {
566 return list_entry(list, struct trace_page, linkage);
567 }
568
569 static void
570 tage_to_tail(struct trace_page *tage, struct list_head *queue)
571 {
572 __ASSERT(tage != NULL);
573 __ASSERT(queue != NULL);
574
575 list_move_tail(&tage->linkage, queue);
576 }
577
578 /* try to return a page that has 'len' bytes left at the end */
579 static struct trace_page *
580 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
581 {
582 struct trace_page *tage;
583
584 if (tcd->tcd_cur_pages > 0) {
585 __ASSERT(!list_empty(&tcd->tcd_pages));
586 tage = tage_from_list(tcd->tcd_pages.prev);
587 if (tage->used + len <= PAGE_SIZE)
588 return tage;
589 }
590
591 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
592 if (tcd->tcd_cur_stock_pages > 0) {
593 tage = tage_from_list(tcd->tcd_stock_pages.prev);
594 tcd->tcd_cur_stock_pages--;
595 list_del_init(&tage->linkage);
596 } else {
597 tage = tage_alloc(GFP_ATOMIC);
598 if (tage == NULL) {
599 printk(KERN_WARNING
600 "failure to allocate a tage (%ld)\n",
601 tcd->tcd_cur_pages);
602 return NULL;
603 }
604 }
605
606 tage->used = 0;
607 tage->cpu = smp_processor_id();
608 tage->type = tcd->tcd_type;
609 list_add_tail(&tage->linkage, &tcd->tcd_pages);
610 tcd->tcd_cur_pages++;
611
612 return tage;
613 }
614
615 return NULL;
616 }
617
618 /* return a page that has 'len' bytes left at the end */
619 static struct trace_page *
620 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
621 {
622 struct trace_page *tage;
623
624 __ASSERT(len <= PAGE_SIZE);
625
626 tage = trace_get_tage_try(tcd, len);
627 if (tage)
628 return tage;
629
630 if (tcd->tcd_cur_pages > 0) {
631 tage = tage_from_list(tcd->tcd_pages.next);
632 tage->used = 0;
633 tage_to_tail(tage, &tcd->tcd_pages);
634 }
635
636 return tage;
637 }
638
639 int
640 spl_debug_vmsg(spl_debug_limit_state_t *cdls, int subsys, int mask,
641 const char *file, const char *fn, const int line,
642 const char *format1, va_list args, const char *format2, ...)
643 {
644 struct trace_cpu_data *tcd = NULL;
645 struct spl_debug_header header = { 0, };
646 struct trace_page *tage;
647 /* string_buf is used only if tcd != NULL, and is always set then */
648 char *string_buf = NULL;
649 char *debug_buf;
650 int known_size;
651 int needed = 85; /* average message length */
652 int max_nob;
653 va_list ap;
654 int i;
655 int remain;
656
657 if (strchr(file, '/'))
658 file = strrchr(file, '/') + 1;
659
660 trace_set_debug_header(&header, subsys, mask, line, CDEBUG_STACK());
661
662 tcd = trace_get_tcd();
663 if (tcd == NULL)
664 goto console;
665
666 if (tcd->tcd_shutting_down) {
667 trace_put_tcd(tcd);
668 tcd = NULL;
669 goto console;
670 }
671
672 known_size = strlen(file) + 1;
673 if (fn)
674 known_size += strlen(fn) + 1;
675
676 if (spl_debug_binary)
677 known_size += sizeof(header);
678
679 /* '2' used because vsnprintf returns real size required for output
680 * _without_ terminating NULL. */
681 for (i = 0; i < 2; i++) {
682 tage = trace_get_tage(tcd, needed + known_size + 1);
683 if (tage == NULL) {
684 if (needed + known_size > PAGE_SIZE)
685 mask |= D_ERROR;
686
687 trace_put_tcd(tcd);
688 tcd = NULL;
689 goto console;
690 }
691
692 string_buf = (char *)page_address(tage->page) +
693 tage->used + known_size;
694
695 max_nob = PAGE_SIZE - tage->used - known_size;
696 if (max_nob <= 0) {
697 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
698 mask |= D_ERROR;
699 trace_put_tcd(tcd);
700 tcd = NULL;
701 goto console;
702 }
703
704 needed = 0;
705 if (format1) {
706 va_copy(ap, args);
707 needed = vsnprintf(string_buf, max_nob, format1, ap);
708 va_end(ap);
709 }
710
711 if (format2) {
712 remain = max_nob - needed;
713 if (remain < 0)
714 remain = 0;
715
716 va_start(ap, format2);
717 needed += vsnprintf(string_buf+needed, remain, format2, ap);
718 va_end(ap);
719 }
720
721 if (needed < max_nob)
722 break;
723 }
724
725 if (unlikely(*(string_buf + needed - 1) != '\n'))
726 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
727 file, line, fn);
728
729 header.ph_len = known_size + needed;
730 debug_buf = (char *)page_address(tage->page) + tage->used;
731
732 if (spl_debug_binary) {
733 memcpy(debug_buf, &header, sizeof(header));
734 tage->used += sizeof(header);
735 debug_buf += sizeof(header);
736 }
737
738 strcpy(debug_buf, file);
739 tage->used += strlen(file) + 1;
740 debug_buf += strlen(file) + 1;
741
742 if (fn) {
743 strcpy(debug_buf, fn);
744 tage->used += strlen(fn) + 1;
745 debug_buf += strlen(fn) + 1;
746 }
747
748 __ASSERT(debug_buf == string_buf);
749
750 tage->used += needed;
751 __ASSERT (tage->used <= PAGE_SIZE);
752
753 console:
754 if ((mask & spl_debug_printk) == 0) {
755 /* no console output requested */
756 if (tcd != NULL)
757 trace_put_tcd(tcd);
758 return 1;
759 }
760
761 if (cdls != NULL) {
762 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
763 !time_before(cdls->cdls_next, jiffies)) {
764 /* skipping a console message */
765 cdls->cdls_count++;
766 if (tcd != NULL)
767 trace_put_tcd(tcd);
768 return 1;
769 }
770
771 if (time_before(cdls->cdls_next + spl_console_max_delay +
772 (10 * HZ), jiffies)) {
773 /* last timeout was a long time ago */
774 cdls->cdls_delay /= spl_console_backoff * 4;
775 } else {
776 cdls->cdls_delay *= spl_console_backoff;
777
778 if (cdls->cdls_delay < spl_console_min_delay)
779 cdls->cdls_delay = spl_console_min_delay;
780 else if (cdls->cdls_delay > spl_console_max_delay)
781 cdls->cdls_delay = spl_console_max_delay;
782 }
783
784 /* ensure cdls_next is never zero after it's been seen */
785 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
786 }
787
788 if (tcd != NULL) {
789 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
790 trace_put_tcd(tcd);
791 } else {
792 string_buf = trace_get_console_buffer();
793
794 needed = 0;
795 if (format1 != NULL) {
796 va_copy(ap, args);
797 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
798 va_end(ap);
799 }
800 if (format2 != NULL) {
801 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
802 if (remain > 0) {
803 va_start(ap, format2);
804 needed += vsnprintf(string_buf+needed, remain, format2, ap);
805 va_end(ap);
806 }
807 }
808 trace_print_to_console(&header, mask,
809 string_buf, needed, file, fn);
810
811 trace_put_console_buffer(string_buf);
812 }
813
814 if (cdls != NULL && cdls->cdls_count != 0) {
815 string_buf = trace_get_console_buffer();
816
817 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
818 "Skipped %d previous similar message%s\n",
819 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
820
821 trace_print_to_console(&header, mask,
822 string_buf, needed, file, fn);
823
824 trace_put_console_buffer(string_buf);
825 cdls->cdls_count = 0;
826 }
827
828 return 0;
829 }
830 EXPORT_SYMBOL(spl_debug_vmsg);
831
832 /* Do the collect_pages job on a single CPU: assumes that all other
833 * CPUs have been stopped during a panic. If this isn't true for
834 * some arch, this will have to be implemented separately in each arch.
835 */
836 static void
837 collect_pages_from_single_cpu(struct page_collection *pc)
838 {
839 struct trace_cpu_data *tcd;
840 int i, j;
841
842 tcd_for_each(tcd, i, j) {
843 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
844 tcd->tcd_cur_pages = 0;
845 }
846 }
847
848 static void
849 collect_pages_on_all_cpus(struct page_collection *pc)
850 {
851 struct trace_cpu_data *tcd;
852 int i, cpu;
853
854 spin_lock(&pc->pc_lock);
855 for_each_possible_cpu(cpu) {
856 tcd_for_each_type_lock(tcd, i, cpu) {
857 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
858 tcd->tcd_cur_pages = 0;
859 }
860 }
861 spin_unlock(&pc->pc_lock);
862 }
863
864 static void
865 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
866 {
867 INIT_LIST_HEAD(&pc->pc_pages);
868
869 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
870 collect_pages_from_single_cpu(pc);
871 else
872 collect_pages_on_all_cpus(pc);
873 }
874
875 static void
876 put_pages_back_on_all_cpus(struct page_collection *pc)
877 {
878 struct trace_cpu_data *tcd;
879 struct list_head *cur_head;
880 struct trace_page *tage;
881 struct trace_page *tmp;
882 int i, cpu;
883
884 spin_lock(&pc->pc_lock);
885
886 for_each_possible_cpu(cpu) {
887 tcd_for_each_type_lock(tcd, i, cpu) {
888 cur_head = tcd->tcd_pages.next;
889
890 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
891 linkage) {
892
893 __ASSERT_TAGE_INVARIANT(tage);
894
895 if (tage->cpu != cpu || tage->type != i)
896 continue;
897
898 tage_to_tail(tage, cur_head);
899 tcd->tcd_cur_pages++;
900 }
901 }
902 }
903
904 spin_unlock(&pc->pc_lock);
905 }
906
907 static void
908 put_pages_back(struct page_collection *pc)
909 {
910 if (!spl_panic_in_progress)
911 put_pages_back_on_all_cpus(pc);
912 }
913
914 static struct file *
915 trace_filp_open (const char *name, int flags, int mode, int *err)
916 {
917 struct file *filp = NULL;
918 int rc;
919
920 filp = filp_open(name, flags, mode);
921 if (IS_ERR(filp)) {
922 rc = PTR_ERR(filp);
923 printk(KERN_ERR "SPL: Can't open %s file: %d\n", name, rc);
924 if (err)
925 *err = rc;
926 filp = NULL;
927 }
928 return filp;
929 }
930
931 #define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
932 #define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
933 #define trace_filp_close(f) filp_close(f, NULL)
934 #define trace_filp_poff(f) (&(f)->f_pos)
935
936 static int
937 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
938 {
939 struct page_collection pc;
940 struct file *filp;
941 struct trace_page *tage;
942 struct trace_page *tmp;
943 mm_segment_t oldfs;
944 int rc = 0;
945
946 down_write(&trace_sem);
947
948 filp = trace_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
949 0600, &rc);
950 if (filp == NULL) {
951 if (rc != -EEXIST)
952 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
953 filename, rc);
954 goto out;
955 }
956
957 spin_lock_init(&pc.pc_lock);
958 collect_pages(dp, &pc);
959 if (list_empty(&pc.pc_pages)) {
960 rc = 0;
961 goto close;
962 }
963
964 oldfs = get_fs();
965 set_fs(get_ds());
966
967 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
968 __ASSERT_TAGE_INVARIANT(tage);
969
970 rc = trace_filp_write(filp, page_address(tage->page),
971 tage->used, trace_filp_poff(filp));
972 if (rc != (int)tage->used) {
973 printk(KERN_WARNING "SPL: Wanted to write %u "
974 "but wrote %d\n", tage->used, rc);
975 put_pages_back(&pc);
976 __ASSERT(list_empty(&pc.pc_pages));
977 break;
978 }
979 list_del(&tage->linkage);
980 tage_free(tage);
981 }
982
983 set_fs(oldfs);
984
985 rc = trace_filp_fsync(filp);
986 if (rc)
987 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
988 close:
989 trace_filp_close(filp);
990 out:
991 up_write(&trace_sem);
992
993 return rc;
994 }
995
996 static void
997 spl_debug_flush_pages(void)
998 {
999 dumplog_priv_t dp;
1000 struct page_collection pc;
1001 struct trace_page *tage;
1002 struct trace_page *tmp;
1003
1004 spin_lock_init(&pc.pc_lock);
1005 init_waitqueue_head(&dp.dp_waitq);
1006 dp.dp_pid = current->pid;
1007 dp.dp_flags = 0;
1008 atomic_set(&dp.dp_done, 0);
1009
1010 collect_pages(&dp, &pc);
1011 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1012 __ASSERT_TAGE_INVARIANT(tage);
1013 list_del(&tage->linkage);
1014 tage_free(tage);
1015 }
1016 }
1017
1018 unsigned long
1019 spl_debug_set_mask(unsigned long mask) {
1020 spl_debug_mask = mask;
1021 return 0;
1022 }
1023 EXPORT_SYMBOL(spl_debug_set_mask);
1024
1025 unsigned long
1026 spl_debug_get_mask(void) {
1027 return spl_debug_mask;
1028 }
1029 EXPORT_SYMBOL(spl_debug_get_mask);
1030
1031 unsigned long
1032 spl_debug_set_subsys(unsigned long subsys) {
1033 spl_debug_subsys = subsys;
1034 return 0;
1035 }
1036 EXPORT_SYMBOL(spl_debug_set_subsys);
1037
1038 unsigned long
1039 spl_debug_get_subsys(void) {
1040 return spl_debug_subsys;
1041 }
1042 EXPORT_SYMBOL(spl_debug_get_subsys);
1043
1044 int
1045 spl_debug_set_mb(int mb)
1046 {
1047 int i, j, pages;
1048 int limit = trace_max_debug_mb();
1049 struct trace_cpu_data *tcd;
1050
1051 if (mb < num_possible_cpus()) {
1052 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1053 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1054 return -EINVAL;
1055 }
1056
1057 if (mb > limit) {
1058 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1059 "%dMB - upper limit is %d\n", mb, limit);
1060 return -EINVAL;
1061 }
1062
1063 mb /= num_possible_cpus();
1064 pages = mb << (20 - PAGE_SHIFT);
1065
1066 down_write(&trace_sem);
1067
1068 tcd_for_each(tcd, i, j)
1069 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1070
1071 up_write(&trace_sem);
1072
1073 return 0;
1074 }
1075 EXPORT_SYMBOL(spl_debug_set_mb);
1076
1077 int
1078 spl_debug_get_mb(void)
1079 {
1080 int i, j;
1081 struct trace_cpu_data *tcd;
1082 int total_pages = 0;
1083
1084 down_read(&trace_sem);
1085
1086 tcd_for_each(tcd, i, j)
1087 total_pages += tcd->tcd_max_pages;
1088
1089 up_read(&trace_sem);
1090
1091 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1092 }
1093 EXPORT_SYMBOL(spl_debug_get_mb);
1094
1095 void spl_debug_dumpstack(struct task_struct *tsk)
1096 {
1097 extern void show_task(struct task_struct *);
1098
1099 if (tsk == NULL)
1100 tsk = current;
1101
1102 printk(KERN_ERR "SPL: Showing stack for process %d\n", tsk->pid);
1103 dump_stack();
1104 }
1105 EXPORT_SYMBOL(spl_debug_dumpstack);
1106
1107 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1108 {
1109 spl_debug_catastrophe = 1;
1110 spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
1111
1112 if (in_interrupt()) {
1113 panic("SBUG in interrupt.\n");
1114 /* not reached */
1115 }
1116
1117 /* Ensure all debug pages and dumped by current cpu */
1118 if (spl_debug_panic_on_bug)
1119 spl_panic_in_progress = 1;
1120
1121 spl_debug_dumpstack(NULL);
1122 spl_debug_dumplog(flags);
1123
1124 if (spl_debug_panic_on_bug)
1125 panic("SBUG");
1126
1127 set_task_state(current, TASK_UNINTERRUPTIBLE);
1128 while (1)
1129 schedule();
1130 }
1131 EXPORT_SYMBOL(spl_debug_bug);
1132
1133 int
1134 spl_debug_clear_buffer(void)
1135 {
1136 spl_debug_flush_pages();
1137 return 0;
1138 }
1139 EXPORT_SYMBOL(spl_debug_clear_buffer);
1140
1141 int
1142 spl_debug_mark_buffer(char *text)
1143 {
1144 CDEBUG(D_WARNING, "*************************************\n");
1145 CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
1146 CDEBUG(D_WARNING, "*************************************\n");
1147
1148 return 0;
1149 }
1150 EXPORT_SYMBOL(spl_debug_mark_buffer);
1151
1152 static int
1153 trace_init(int max_pages)
1154 {
1155 struct trace_cpu_data *tcd;
1156 int i, j;
1157
1158 init_rwsem(&trace_sem);
1159
1160 /* initialize trace_data */
1161 memset(trace_data, 0, sizeof(trace_data));
1162 for (i = 0; i < TCD_TYPE_MAX; i++) {
1163 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1164 NR_CPUS, GFP_KERNEL);
1165 if (trace_data[i] == NULL)
1166 goto out;
1167 }
1168
1169 tcd_for_each(tcd, i, j) {
1170 spin_lock_init(&tcd->tcd_lock);
1171 tcd->tcd_pages_factor = pages_factor[i];
1172 tcd->tcd_type = i;
1173 tcd->tcd_cpu = j;
1174 INIT_LIST_HEAD(&tcd->tcd_pages);
1175 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1176 tcd->tcd_cur_pages = 0;
1177 tcd->tcd_cur_stock_pages = 0;
1178 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1179 tcd->tcd_shutting_down = 0;
1180 }
1181
1182 for (i = 0; i < num_possible_cpus(); i++) {
1183 for (j = 0; j < 3; j++) {
1184 trace_console_buffers[i][j] =
1185 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1186 GFP_KERNEL);
1187
1188 if (trace_console_buffers[i][j] == NULL)
1189 goto out;
1190 }
1191 }
1192
1193 return 0;
1194 out:
1195 trace_fini();
1196 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1197 return -ENOMEM;
1198 }
1199
1200 int
1201 debug_init(void)
1202 {
1203 int rc, max = spl_debug_mb;
1204
1205 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1206 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1207
1208 /* If spl_debug_mb is set to an invalid value or uninitialized
1209 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1210 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1211 max >= 512 || max < 0) {
1212 max = TCD_MAX_PAGES;
1213 } else {
1214 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1215 }
1216
1217 rc = trace_init(max);
1218 if (rc)
1219 return rc;
1220
1221 return rc;
1222 }
1223
1224 static void
1225 trace_cleanup_on_all_cpus(void)
1226 {
1227 struct trace_cpu_data *tcd;
1228 struct trace_page *tage;
1229 struct trace_page *tmp;
1230 int i, cpu;
1231
1232 for_each_possible_cpu(cpu) {
1233 tcd_for_each_type_lock(tcd, i, cpu) {
1234 tcd->tcd_shutting_down = 1;
1235
1236 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1237 linkage) {
1238 __ASSERT_TAGE_INVARIANT(tage);
1239
1240 list_del(&tage->linkage);
1241 tage_free(tage);
1242 }
1243 tcd->tcd_cur_pages = 0;
1244 }
1245 }
1246 }
1247
1248 static void
1249 trace_fini(void)
1250 {
1251 int i, j;
1252
1253 trace_cleanup_on_all_cpus();
1254
1255 for (i = 0; i < num_possible_cpus(); i++) {
1256 for (j = 0; j < 3; j++) {
1257 if (trace_console_buffers[i][j] != NULL) {
1258 kfree(trace_console_buffers[i][j]);
1259 trace_console_buffers[i][j] = NULL;
1260 }
1261 }
1262 }
1263
1264 for (i = 0; trace_data[i] != NULL; i++) {
1265 kfree(trace_data[i]);
1266 trace_data[i] = NULL;
1267 }
1268 }
1269
1270 void
1271 debug_fini(void)
1272 {
1273 trace_fini();
1274 }