]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-debug.c
Prepend spl_ to all init/fini functions
[mirror_spl.git] / module / spl / spl-debug.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Debug Implementation.
25 \*****************************************************************************/
26
27 #include <linux/kmod.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/kthread.h>
34 #include <linux/hardirq.h>
35 #include <linux/interrupt.h>
36 #include <linux/spinlock.h>
37 #include <linux/proc_compat.h>
38 #include <linux/file_compat.h>
39 #include <sys/sysmacros.h>
40 #include <spl-debug.h>
41 #include <spl-trace.h>
42 #include <spl-ctl.h>
43
44 #ifdef SS_DEBUG_SUBSYS
45 #undef SS_DEBUG_SUBSYS
46 #endif
47
48 #define SS_DEBUG_SUBSYS SS_DEBUG
49
50 unsigned long spl_debug_subsys = ~0;
51 EXPORT_SYMBOL(spl_debug_subsys);
52 module_param(spl_debug_subsys, ulong, 0644);
53 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
54
55 unsigned long spl_debug_mask = SD_CANTMASK;
56 EXPORT_SYMBOL(spl_debug_mask);
57 module_param(spl_debug_mask, ulong, 0644);
58 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
59
60 unsigned long spl_debug_printk = SD_CANTMASK;
61 EXPORT_SYMBOL(spl_debug_printk);
62 module_param(spl_debug_printk, ulong, 0644);
63 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
64
65 int spl_debug_mb = -1;
66 EXPORT_SYMBOL(spl_debug_mb);
67 module_param(spl_debug_mb, int, 0644);
68 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
69
70 unsigned int spl_debug_binary = 1;
71 EXPORT_SYMBOL(spl_debug_binary);
72
73 unsigned int spl_debug_catastrophe;
74 EXPORT_SYMBOL(spl_debug_catastrophe);
75
76 unsigned int spl_debug_panic_on_bug = 0;
77 EXPORT_SYMBOL(spl_debug_panic_on_bug);
78 module_param(spl_debug_panic_on_bug, uint, 0644);
79 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
80
81 static char spl_debug_file_name[PATH_MAX];
82 char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
83
84 unsigned int spl_console_ratelimit = 1;
85 EXPORT_SYMBOL(spl_console_ratelimit);
86
87 long spl_console_max_delay;
88 EXPORT_SYMBOL(spl_console_max_delay);
89
90 long spl_console_min_delay;
91 EXPORT_SYMBOL(spl_console_min_delay);
92
93 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
94 EXPORT_SYMBOL(spl_console_backoff);
95
96 unsigned int spl_debug_stack;
97 EXPORT_SYMBOL(spl_debug_stack);
98
99 static int spl_panic_in_progress;
100
101 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
102 char *trace_console_buffers[NR_CPUS][3];
103 struct rw_semaphore trace_sem;
104 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
105
106 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
107 static void trace_fini(void);
108
109
110 /* Memory percentage breakdown by type */
111 static unsigned int pages_factor[TCD_TYPE_MAX] = {
112 80, /* 80% pages for TCD_TYPE_PROC */
113 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
114 10 /* 10% pages for TCD_TYPE_IRQ */
115 };
116
117 const char *
118 spl_debug_subsys2str(int subsys)
119 {
120 switch (subsys) {
121 default:
122 return NULL;
123 case SS_UNDEFINED:
124 return "undefined";
125 case SS_ATOMIC:
126 return "atomic";
127 case SS_KOBJ:
128 return "kobj";
129 case SS_VNODE:
130 return "vnode";
131 case SS_TIME:
132 return "time";
133 case SS_RWLOCK:
134 return "rwlock";
135 case SS_THREAD:
136 return "thread";
137 case SS_CONDVAR:
138 return "condvar";
139 case SS_MUTEX:
140 return "mutex";
141 case SS_RNG:
142 return "rng";
143 case SS_TASKQ:
144 return "taskq";
145 case SS_KMEM:
146 return "kmem";
147 case SS_DEBUG:
148 return "debug";
149 case SS_GENERIC:
150 return "generic";
151 case SS_PROC:
152 return "proc";
153 case SS_MODULE:
154 return "module";
155 case SS_CRED:
156 return "cred";
157 case SS_KSTAT:
158 return "kstat";
159 case SS_XDR:
160 return "xdr";
161 case SS_TSD:
162 return "tsd";
163 case SS_ZLIB:
164 return "zlib";
165 case SS_USER1:
166 return "user1";
167 case SS_USER2:
168 return "user2";
169 case SS_USER3:
170 return "user3";
171 case SS_USER4:
172 return "user4";
173 case SS_USER5:
174 return "user5";
175 case SS_USER6:
176 return "user6";
177 case SS_USER7:
178 return "user7";
179 case SS_USER8:
180 return "user8";
181 }
182 }
183
184 const char *
185 spl_debug_dbg2str(int debug)
186 {
187 switch (debug) {
188 default:
189 return NULL;
190 case SD_TRACE:
191 return "trace";
192 case SD_INFO:
193 return "info";
194 case SD_WARNING:
195 return "warning";
196 case SD_ERROR:
197 return "error";
198 case SD_EMERG:
199 return "emerg";
200 case SD_CONSOLE:
201 return "console";
202 case SD_IOCTL:
203 return "ioctl";
204 case SD_DPRINTF:
205 return "dprintf";
206 case SD_OTHER:
207 return "other";
208 }
209 }
210
211 int
212 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
213 {
214 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
215 spl_debug_dbg2str;
216 const char *token;
217 int i, bit, len = 0;
218
219 if (mask == 0) { /* "0" */
220 if (size > 0)
221 str[0] = '0';
222 len = 1;
223 } else { /* space-separated tokens */
224 for (i = 0; i < 32; i++) {
225 bit = 1 << i;
226
227 if ((mask & bit) == 0)
228 continue;
229
230 token = fn(bit);
231 if (token == NULL) /* unused bit */
232 continue;
233
234 if (len > 0) { /* separator? */
235 if (len < size)
236 str[len] = ' ';
237 len++;
238 }
239
240 while (*token != 0) {
241 if (len < size)
242 str[len] = *token;
243 token++;
244 len++;
245 }
246 }
247 }
248
249 /* terminate 'str' */
250 if (len < size)
251 str[len] = 0;
252 else
253 str[size - 1] = 0;
254
255 return len;
256 }
257
258 static int
259 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
260 {
261 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
262 spl_debug_dbg2str;
263 const char *token;
264 int i, j, bit;
265
266 /* match against known tokens */
267 for (i = 0; i < 32; i++) {
268 bit = 1 << i;
269
270 token = fn(bit);
271 if (token == NULL) /* unused? */
272 continue;
273
274 /* strcasecmp */
275 for (j = 0; ; j++) {
276 if (j == len) { /* end of token */
277 if (token[j] == 0) {
278 *mask = bit;
279 return 0;
280 }
281 break;
282 }
283
284 if (token[j] == 0)
285 break;
286
287 if (str[j] == token[j])
288 continue;
289
290 if (str[j] < 'A' || 'Z' < str[j])
291 break;
292
293 if (str[j] - 'A' + 'a' != token[j])
294 break;
295 }
296 }
297
298 return -EINVAL; /* no match */
299 }
300
301 int
302 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
303 {
304 char op = 0;
305 int m = 0, matched, n, t;
306
307 /* Allow a number for backwards compatibility */
308 for (n = strlen(str); n > 0; n--)
309 if (!isspace(str[n-1]))
310 break;
311 matched = n;
312
313 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
314 *mask = m;
315 return 0;
316 }
317
318 /* <str> must be a list of debug tokens or numbers separated by
319 * whitespace and optionally an operator ('+' or '-'). If an operator
320 * appears first in <str>, '*mask' is used as the starting point
321 * (relative), otherwise 0 is used (absolute). An operator applies to
322 * all following tokens up to the next operator. */
323 matched = 0;
324 while (*str != 0) {
325 while (isspace(*str)) /* skip whitespace */
326 str++;
327
328 if (*str == 0)
329 break;
330
331 if (*str == '+' || *str == '-') {
332 op = *str++;
333
334 /* op on first token == relative */
335 if (!matched)
336 m = *mask;
337
338 while (isspace(*str)) /* skip whitespace */
339 str++;
340
341 if (*str == 0) /* trailing op */
342 return -EINVAL;
343 }
344
345 /* find token length */
346 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
347
348 /* match token */
349 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
350 return -EINVAL;
351
352 matched = 1;
353 if (op == '-')
354 m &= ~t;
355 else
356 m |= t;
357
358 str += n;
359 }
360
361 if (!matched)
362 return -EINVAL;
363
364 *mask = m;
365 return 0;
366 }
367
368 static void
369 spl_debug_dumplog_internal(dumplog_priv_t *dp)
370 {
371 void *journal_info;
372
373 journal_info = current->journal_info;
374 current->journal_info = NULL;
375
376 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
377 "%s.%ld.%ld", spl_debug_file_path,
378 get_seconds(), (long)dp->dp_pid);
379 printk("SPL: Dumping log to %s\n", spl_debug_file_name);
380 spl_debug_dump_all_pages(dp, spl_debug_file_name);
381
382 current->journal_info = journal_info;
383 }
384
385 static int
386 spl_debug_dumplog_thread(void *arg)
387 {
388 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
389
390 spl_debug_dumplog_internal(dp);
391 atomic_set(&dp->dp_done, 1);
392 wake_up(&dp->dp_waitq);
393 complete_and_exit(NULL, 0);
394
395 return 0; /* Unreachable */
396 }
397
398 /* When flag is set do not use a new thread for the debug dump */
399 int
400 spl_debug_dumplog(int flags)
401 {
402 struct task_struct *tsk;
403 dumplog_priv_t dp;
404
405 init_waitqueue_head(&dp.dp_waitq);
406 dp.dp_pid = current->pid;
407 dp.dp_flags = flags;
408 atomic_set(&dp.dp_done, 0);
409
410 if (dp.dp_flags & DL_NOTHREAD) {
411 spl_debug_dumplog_internal(&dp);
412 } else {
413
414 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
415 if (tsk == NULL)
416 return -ENOMEM;
417
418 wake_up_process(tsk);
419 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
420 }
421
422 return 0;
423 }
424 EXPORT_SYMBOL(spl_debug_dumplog);
425
426 static char *
427 trace_get_console_buffer(void)
428 {
429 int cpu = get_cpu();
430 int idx;
431
432 if (in_irq()) {
433 idx = 0;
434 } else if (in_softirq()) {
435 idx = 1;
436 } else {
437 idx = 2;
438 }
439
440 return trace_console_buffers[cpu][idx];
441 }
442
443 static void
444 trace_put_console_buffer(char *buffer)
445 {
446 put_cpu();
447 }
448
449 static int
450 trace_lock_tcd(struct trace_cpu_data *tcd)
451 {
452 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
453
454 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
455
456 return 1;
457 }
458
459 static void
460 trace_unlock_tcd(struct trace_cpu_data *tcd)
461 {
462 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
463
464 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
465 }
466
467 static struct trace_cpu_data *
468 trace_get_tcd(void)
469 {
470 int cpu;
471 struct trace_cpu_data *tcd;
472
473 cpu = get_cpu();
474 if (in_irq())
475 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
476 else if (in_softirq())
477 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
478 else
479 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
480
481 trace_lock_tcd(tcd);
482
483 return tcd;
484 }
485
486 static void
487 trace_put_tcd (struct trace_cpu_data *tcd)
488 {
489 trace_unlock_tcd(tcd);
490
491 put_cpu();
492 }
493
494 static void
495 trace_set_debug_header(struct spl_debug_header *header, int subsys,
496 int mask, const int line, unsigned long stack)
497 {
498 struct timeval tv;
499
500 do_gettimeofday(&tv);
501
502 header->ph_subsys = subsys;
503 header->ph_mask = mask;
504 header->ph_cpu_id = smp_processor_id();
505 header->ph_sec = (__u32)tv.tv_sec;
506 header->ph_usec = tv.tv_usec;
507 header->ph_stack = stack;
508 header->ph_pid = current->pid;
509 header->ph_line_num = line;
510
511 return;
512 }
513
514 static void
515 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
516 int len, const char *file, const char *fn)
517 {
518 char *prefix = "SPL", *ptype = NULL;
519
520 if ((mask & SD_EMERG) != 0) {
521 prefix = "SPLError";
522 ptype = KERN_EMERG;
523 } else if ((mask & SD_ERROR) != 0) {
524 prefix = "SPLError";
525 ptype = KERN_ERR;
526 } else if ((mask & SD_WARNING) != 0) {
527 prefix = "SPL";
528 ptype = KERN_WARNING;
529 } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
530 prefix = "SPL";
531 ptype = KERN_INFO;
532 }
533
534 if ((mask & SD_CONSOLE) != 0) {
535 printk("%s%s: %.*s", ptype, prefix, len, buf);
536 } else {
537 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
538 hdr->ph_pid, hdr->ph_stack, file,
539 hdr->ph_line_num, fn, len, buf);
540 }
541
542 return;
543 }
544
545 static int
546 trace_max_debug_mb(void)
547 {
548 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
549 }
550
551 static struct trace_page *
552 tage_alloc(int gfp)
553 {
554 struct page *page;
555 struct trace_page *tage;
556
557 page = alloc_pages(gfp | __GFP_NOWARN, 0);
558 if (page == NULL)
559 return NULL;
560
561 tage = kmalloc(sizeof(*tage), gfp);
562 if (tage == NULL) {
563 __free_pages(page, 0);
564 return NULL;
565 }
566
567 tage->page = page;
568 atomic_inc(&trace_tage_allocated);
569
570 return tage;
571 }
572
573 static void
574 tage_free(struct trace_page *tage)
575 {
576 __ASSERT(tage != NULL);
577 __ASSERT(tage->page != NULL);
578
579 __free_pages(tage->page, 0);
580 kfree(tage);
581 atomic_dec(&trace_tage_allocated);
582 }
583
584 static struct trace_page *
585 tage_from_list(struct list_head *list)
586 {
587 return list_entry(list, struct trace_page, linkage);
588 }
589
590 static void
591 tage_to_tail(struct trace_page *tage, struct list_head *queue)
592 {
593 __ASSERT(tage != NULL);
594 __ASSERT(queue != NULL);
595
596 list_move_tail(&tage->linkage, queue);
597 }
598
599 /* try to return a page that has 'len' bytes left at the end */
600 static struct trace_page *
601 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
602 {
603 struct trace_page *tage;
604
605 if (tcd->tcd_cur_pages > 0) {
606 __ASSERT(!list_empty(&tcd->tcd_pages));
607 tage = tage_from_list(tcd->tcd_pages.prev);
608 if (tage->used + len <= PAGE_SIZE)
609 return tage;
610 }
611
612 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
613 if (tcd->tcd_cur_stock_pages > 0) {
614 tage = tage_from_list(tcd->tcd_stock_pages.prev);
615 tcd->tcd_cur_stock_pages--;
616 list_del_init(&tage->linkage);
617 } else {
618 tage = tage_alloc(GFP_ATOMIC);
619 if (tage == NULL) {
620 printk(KERN_WARNING
621 "failure to allocate a tage (%ld)\n",
622 tcd->tcd_cur_pages);
623 return NULL;
624 }
625 }
626
627 tage->used = 0;
628 tage->cpu = smp_processor_id();
629 tage->type = tcd->tcd_type;
630 list_add_tail(&tage->linkage, &tcd->tcd_pages);
631 tcd->tcd_cur_pages++;
632
633 return tage;
634 }
635
636 return NULL;
637 }
638
639 /* return a page that has 'len' bytes left at the end */
640 static struct trace_page *
641 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
642 {
643 struct trace_page *tage;
644
645 __ASSERT(len <= PAGE_SIZE);
646
647 tage = trace_get_tage_try(tcd, len);
648 if (tage)
649 return tage;
650
651 if (tcd->tcd_cur_pages > 0) {
652 tage = tage_from_list(tcd->tcd_pages.next);
653 tage->used = 0;
654 tage_to_tail(tage, &tcd->tcd_pages);
655 }
656
657 return tage;
658 }
659
660 int
661 spl_debug_msg(void *arg, int subsys, int mask, const char *file,
662 const char *fn, const int line, const char *format, ...)
663 {
664 spl_debug_limit_state_t *cdls = arg;
665 struct trace_cpu_data *tcd = NULL;
666 struct spl_debug_header header = { 0, };
667 struct trace_page *tage;
668 /* string_buf is used only if tcd != NULL, and is always set then */
669 char *string_buf = NULL;
670 char *debug_buf;
671 int known_size;
672 int needed = 85; /* average message length */
673 int max_nob;
674 va_list ap;
675 int i;
676
677 if (subsys == 0)
678 subsys = SS_DEBUG_SUBSYS;
679
680 if (mask == 0)
681 mask = SD_EMERG;
682
683 if (strchr(file, '/'))
684 file = strrchr(file, '/') + 1;
685
686 trace_set_debug_header(&header, subsys, mask, line, 0);
687
688 tcd = trace_get_tcd();
689 if (tcd == NULL)
690 goto console;
691
692 if (tcd->tcd_shutting_down) {
693 trace_put_tcd(tcd);
694 tcd = NULL;
695 goto console;
696 }
697
698 known_size = strlen(file) + 1;
699 if (fn)
700 known_size += strlen(fn) + 1;
701
702 if (spl_debug_binary)
703 known_size += sizeof(header);
704
705 /* '2' used because vsnprintf returns real size required for output
706 * _without_ terminating NULL. */
707 for (i = 0; i < 2; i++) {
708 tage = trace_get_tage(tcd, needed + known_size + 1);
709 if (tage == NULL) {
710 if (needed + known_size > PAGE_SIZE)
711 mask |= SD_ERROR;
712
713 trace_put_tcd(tcd);
714 tcd = NULL;
715 goto console;
716 }
717
718 string_buf = (char *)page_address(tage->page) +
719 tage->used + known_size;
720
721 max_nob = PAGE_SIZE - tage->used - known_size;
722 if (max_nob <= 0) {
723 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
724 mask |= SD_ERROR;
725 trace_put_tcd(tcd);
726 tcd = NULL;
727 goto console;
728 }
729
730 needed = 0;
731 if (format) {
732 va_start(ap, format);
733 needed += vsnprintf(string_buf, max_nob, format, ap);
734 va_end(ap);
735 }
736
737 if (needed < max_nob)
738 break;
739 }
740
741 header.ph_len = known_size + needed;
742 debug_buf = (char *)page_address(tage->page) + tage->used;
743
744 if (spl_debug_binary) {
745 memcpy(debug_buf, &header, sizeof(header));
746 tage->used += sizeof(header);
747 debug_buf += sizeof(header);
748 }
749
750 strcpy(debug_buf, file);
751 tage->used += strlen(file) + 1;
752 debug_buf += strlen(file) + 1;
753
754 if (fn) {
755 strcpy(debug_buf, fn);
756 tage->used += strlen(fn) + 1;
757 debug_buf += strlen(fn) + 1;
758 }
759
760 __ASSERT(debug_buf == string_buf);
761
762 tage->used += needed;
763 __ASSERT (tage->used <= PAGE_SIZE);
764
765 console:
766 if ((mask & spl_debug_printk) == 0) {
767 /* no console output requested */
768 if (tcd != NULL)
769 trace_put_tcd(tcd);
770 return 1;
771 }
772
773 if (cdls != NULL) {
774 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
775 !time_before(cdls->cdls_next, jiffies)) {
776 /* skipping a console message */
777 cdls->cdls_count++;
778 if (tcd != NULL)
779 trace_put_tcd(tcd);
780 return 1;
781 }
782
783 if (time_before(cdls->cdls_next + spl_console_max_delay +
784 (10 * HZ), jiffies)) {
785 /* last timeout was a long time ago */
786 cdls->cdls_delay /= spl_console_backoff * 4;
787 } else {
788 cdls->cdls_delay *= spl_console_backoff;
789
790 if (cdls->cdls_delay < spl_console_min_delay)
791 cdls->cdls_delay = spl_console_min_delay;
792 else if (cdls->cdls_delay > spl_console_max_delay)
793 cdls->cdls_delay = spl_console_max_delay;
794 }
795
796 /* ensure cdls_next is never zero after it's been seen */
797 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
798 }
799
800 if (tcd != NULL) {
801 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
802 trace_put_tcd(tcd);
803 } else {
804 string_buf = trace_get_console_buffer();
805
806 needed = 0;
807 if (format != NULL) {
808 va_start(ap, format);
809 needed += vsnprintf(string_buf,
810 TRACE_CONSOLE_BUFFER_SIZE, format, ap);
811 va_end(ap);
812 }
813 trace_print_to_console(&header, mask,
814 string_buf, needed, file, fn);
815
816 trace_put_console_buffer(string_buf);
817 }
818
819 if (cdls != NULL && cdls->cdls_count != 0) {
820 string_buf = trace_get_console_buffer();
821
822 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
823 "Skipped %d previous similar message%s\n",
824 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
825
826 trace_print_to_console(&header, mask,
827 string_buf, needed, file, fn);
828
829 trace_put_console_buffer(string_buf);
830 cdls->cdls_count = 0;
831 }
832
833 return 0;
834 }
835 EXPORT_SYMBOL(spl_debug_msg);
836
837 /* Do the collect_pages job on a single CPU: assumes that all other
838 * CPUs have been stopped during a panic. If this isn't true for
839 * some arch, this will have to be implemented separately in each arch.
840 */
841 static void
842 collect_pages_from_single_cpu(struct page_collection *pc)
843 {
844 struct trace_cpu_data *tcd;
845 int i, j;
846
847 tcd_for_each(tcd, i, j) {
848 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
849 tcd->tcd_cur_pages = 0;
850 }
851 }
852
853 static void
854 collect_pages_on_all_cpus(struct page_collection *pc)
855 {
856 struct trace_cpu_data *tcd;
857 int i, cpu;
858
859 spin_lock(&pc->pc_lock);
860 for_each_possible_cpu(cpu) {
861 tcd_for_each_type_lock(tcd, i, cpu) {
862 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
863 tcd->tcd_cur_pages = 0;
864 }
865 }
866 spin_unlock(&pc->pc_lock);
867 }
868
869 static void
870 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
871 {
872 INIT_LIST_HEAD(&pc->pc_pages);
873
874 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
875 collect_pages_from_single_cpu(pc);
876 else
877 collect_pages_on_all_cpus(pc);
878 }
879
880 static void
881 put_pages_back_on_all_cpus(struct page_collection *pc)
882 {
883 struct trace_cpu_data *tcd;
884 struct list_head *cur_head;
885 struct trace_page *tage;
886 struct trace_page *tmp;
887 int i, cpu;
888
889 spin_lock(&pc->pc_lock);
890
891 for_each_possible_cpu(cpu) {
892 tcd_for_each_type_lock(tcd, i, cpu) {
893 cur_head = tcd->tcd_pages.next;
894
895 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
896 linkage) {
897 if (tage->cpu != cpu || tage->type != i)
898 continue;
899
900 tage_to_tail(tage, cur_head);
901 tcd->tcd_cur_pages++;
902 }
903 }
904 }
905
906 spin_unlock(&pc->pc_lock);
907 }
908
909 static void
910 put_pages_back(struct page_collection *pc)
911 {
912 if (!spl_panic_in_progress)
913 put_pages_back_on_all_cpus(pc);
914 }
915
916 static int
917 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
918 {
919 struct page_collection pc;
920 struct file *filp;
921 struct trace_page *tage;
922 struct trace_page *tmp;
923 mm_segment_t oldfs;
924 int rc = 0;
925
926 down_write(&trace_sem);
927
928 filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
929 0600, &rc);
930 if (filp == NULL) {
931 if (rc != -EEXIST)
932 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
933 filename, rc);
934 goto out;
935 }
936
937 spin_lock_init(&pc.pc_lock);
938 collect_pages(dp, &pc);
939 if (list_empty(&pc.pc_pages)) {
940 rc = 0;
941 goto close;
942 }
943
944 oldfs = get_fs();
945 set_fs(get_ds());
946
947 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
948 rc = spl_filp_write(filp, page_address(tage->page),
949 tage->used, spl_filp_poff(filp));
950 if (rc != (int)tage->used) {
951 printk(KERN_WARNING "SPL: Wanted to write %u "
952 "but wrote %d\n", tage->used, rc);
953 put_pages_back(&pc);
954 __ASSERT(list_empty(&pc.pc_pages));
955 break;
956 }
957 list_del(&tage->linkage);
958 tage_free(tage);
959 }
960
961 set_fs(oldfs);
962
963 rc = spl_filp_fsync(filp, 1);
964 if (rc)
965 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
966 close:
967 spl_filp_close(filp);
968 out:
969 up_write(&trace_sem);
970
971 return rc;
972 }
973
974 static void
975 spl_debug_flush_pages(void)
976 {
977 dumplog_priv_t dp;
978 struct page_collection pc;
979 struct trace_page *tage;
980 struct trace_page *tmp;
981
982 spin_lock_init(&pc.pc_lock);
983 init_waitqueue_head(&dp.dp_waitq);
984 dp.dp_pid = current->pid;
985 dp.dp_flags = 0;
986 atomic_set(&dp.dp_done, 0);
987
988 collect_pages(&dp, &pc);
989 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
990 list_del(&tage->linkage);
991 tage_free(tage);
992 }
993 }
994
995 unsigned long
996 spl_debug_set_mask(unsigned long mask) {
997 spl_debug_mask = mask;
998 return 0;
999 }
1000 EXPORT_SYMBOL(spl_debug_set_mask);
1001
1002 unsigned long
1003 spl_debug_get_mask(void) {
1004 return spl_debug_mask;
1005 }
1006 EXPORT_SYMBOL(spl_debug_get_mask);
1007
1008 unsigned long
1009 spl_debug_set_subsys(unsigned long subsys) {
1010 spl_debug_subsys = subsys;
1011 return 0;
1012 }
1013 EXPORT_SYMBOL(spl_debug_set_subsys);
1014
1015 unsigned long
1016 spl_debug_get_subsys(void) {
1017 return spl_debug_subsys;
1018 }
1019 EXPORT_SYMBOL(spl_debug_get_subsys);
1020
1021 int
1022 spl_debug_set_mb(int mb)
1023 {
1024 int i, j, pages;
1025 int limit = trace_max_debug_mb();
1026 struct trace_cpu_data *tcd;
1027
1028 if (mb < num_possible_cpus()) {
1029 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1030 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1031 return -EINVAL;
1032 }
1033
1034 if (mb > limit) {
1035 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1036 "%dMB - upper limit is %d\n", mb, limit);
1037 return -EINVAL;
1038 }
1039
1040 mb /= num_possible_cpus();
1041 pages = mb << (20 - PAGE_SHIFT);
1042
1043 down_write(&trace_sem);
1044
1045 tcd_for_each(tcd, i, j)
1046 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1047
1048 up_write(&trace_sem);
1049
1050 return 0;
1051 }
1052 EXPORT_SYMBOL(spl_debug_set_mb);
1053
1054 int
1055 spl_debug_get_mb(void)
1056 {
1057 int i, j;
1058 struct trace_cpu_data *tcd;
1059 int total_pages = 0;
1060
1061 down_read(&trace_sem);
1062
1063 tcd_for_each(tcd, i, j)
1064 total_pages += tcd->tcd_max_pages;
1065
1066 up_read(&trace_sem);
1067
1068 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1069 }
1070 EXPORT_SYMBOL(spl_debug_get_mb);
1071
1072 void spl_debug_dumpstack(struct task_struct *tsk)
1073 {
1074 extern void show_task(struct task_struct *);
1075
1076 if (tsk == NULL)
1077 tsk = current;
1078
1079 printk("SPL: Showing stack for process %d\n", tsk->pid);
1080 dump_stack();
1081 }
1082 EXPORT_SYMBOL(spl_debug_dumpstack);
1083
1084 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1085 {
1086 spl_debug_catastrophe = 1;
1087 spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
1088
1089 if (in_interrupt())
1090 panic("SPL PANIC in interrupt.\n");
1091
1092 if (in_atomic() || irqs_disabled())
1093 flags |= DL_NOTHREAD;
1094
1095 /* Ensure all debug pages and dumped by current cpu */
1096 if (spl_debug_panic_on_bug)
1097 spl_panic_in_progress = 1;
1098
1099 spl_debug_dumpstack(NULL);
1100 spl_debug_dumplog(flags);
1101
1102 if (spl_debug_panic_on_bug)
1103 panic("SPL PANIC");
1104
1105 set_task_state(current, TASK_UNINTERRUPTIBLE);
1106 while (1)
1107 schedule();
1108 }
1109 EXPORT_SYMBOL(spl_debug_bug);
1110
1111 int
1112 spl_debug_clear_buffer(void)
1113 {
1114 spl_debug_flush_pages();
1115 return 0;
1116 }
1117 EXPORT_SYMBOL(spl_debug_clear_buffer);
1118
1119 int
1120 spl_debug_mark_buffer(char *text)
1121 {
1122 SDEBUG(SD_WARNING, "*************************************\n");
1123 SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
1124 SDEBUG(SD_WARNING, "*************************************\n");
1125
1126 return 0;
1127 }
1128 EXPORT_SYMBOL(spl_debug_mark_buffer);
1129
1130 static int
1131 trace_init(int max_pages)
1132 {
1133 struct trace_cpu_data *tcd;
1134 int i, j;
1135
1136 init_rwsem(&trace_sem);
1137
1138 /* initialize trace_data */
1139 memset(trace_data, 0, sizeof(trace_data));
1140 for (i = 0; i < TCD_TYPE_MAX; i++) {
1141 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1142 NR_CPUS, GFP_KERNEL);
1143 if (trace_data[i] == NULL)
1144 goto out;
1145 }
1146
1147 tcd_for_each(tcd, i, j) {
1148 spin_lock_init(&tcd->tcd_lock);
1149 tcd->tcd_pages_factor = pages_factor[i];
1150 tcd->tcd_type = i;
1151 tcd->tcd_cpu = j;
1152 INIT_LIST_HEAD(&tcd->tcd_pages);
1153 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1154 tcd->tcd_cur_pages = 0;
1155 tcd->tcd_cur_stock_pages = 0;
1156 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1157 tcd->tcd_shutting_down = 0;
1158 }
1159
1160 for (i = 0; i < num_possible_cpus(); i++) {
1161 for (j = 0; j < 3; j++) {
1162 trace_console_buffers[i][j] =
1163 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1164 GFP_KERNEL);
1165
1166 if (trace_console_buffers[i][j] == NULL)
1167 goto out;
1168 }
1169 }
1170
1171 return 0;
1172 out:
1173 trace_fini();
1174 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1175 return -ENOMEM;
1176 }
1177
1178 int
1179 spl_debug_init(void)
1180 {
1181 int rc, max = spl_debug_mb;
1182
1183 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1184 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1185
1186 /* If spl_debug_mb is set to an invalid value or uninitialized
1187 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1188 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1189 max >= 512 || max < 0) {
1190 max = TCD_MAX_PAGES;
1191 } else {
1192 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1193 }
1194
1195 rc = trace_init(max);
1196 if (rc)
1197 return rc;
1198
1199 return rc;
1200 }
1201
1202 static void
1203 trace_cleanup_on_all_cpus(void)
1204 {
1205 struct trace_cpu_data *tcd;
1206 struct trace_page *tage;
1207 struct trace_page *tmp;
1208 int i, cpu;
1209
1210 for_each_possible_cpu(cpu) {
1211 tcd_for_each_type_lock(tcd, i, cpu) {
1212 tcd->tcd_shutting_down = 1;
1213
1214 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1215 linkage) {
1216 list_del(&tage->linkage);
1217 tage_free(tage);
1218 }
1219 tcd->tcd_cur_pages = 0;
1220 }
1221 }
1222 }
1223
1224 static void
1225 trace_fini(void)
1226 {
1227 int i, j;
1228
1229 trace_cleanup_on_all_cpus();
1230
1231 for (i = 0; i < num_possible_cpus(); i++) {
1232 for (j = 0; j < 3; j++) {
1233 if (trace_console_buffers[i][j] != NULL) {
1234 kfree(trace_console_buffers[i][j]);
1235 trace_console_buffers[i][j] = NULL;
1236 }
1237 }
1238 }
1239
1240 for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
1241 kfree(trace_data[i]);
1242 trace_data[i] = NULL;
1243 }
1244 }
1245
1246 void
1247 spl_debug_fini(void)
1248 {
1249 trace_fini();
1250 }