]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-debug.c
0dd59db56e69ab6cbb60d08d154b390c29d4b12b
[mirror_spl-debian.git] / module / spl / spl-debug.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Debug Implementation.
25 \*****************************************************************************/
26
27 #include <linux/kmod.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/kthread.h>
34 #include <linux/hardirq.h>
35 #include <linux/interrupt.h>
36 #include <linux/spinlock.h>
37 #include <linux/proc_compat.h>
38 #include <linux/file_compat.h>
39 #include <sys/sysmacros.h>
40 #include <spl-debug.h>
41 #include <spl-trace.h>
42 #include <spl-ctl.h>
43
44 #ifdef SS_DEBUG_SUBSYS
45 #undef SS_DEBUG_SUBSYS
46 #endif
47
48 #define SS_DEBUG_SUBSYS SS_DEBUG
49
50 /* Debug log support enabled */
51 #ifdef DEBUG_LOG
52
53 unsigned long spl_debug_subsys = ~0;
54 EXPORT_SYMBOL(spl_debug_subsys);
55 module_param(spl_debug_subsys, ulong, 0644);
56 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
57
58 unsigned long spl_debug_mask = SD_CANTMASK;
59 EXPORT_SYMBOL(spl_debug_mask);
60 module_param(spl_debug_mask, ulong, 0644);
61 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
62
63 unsigned long spl_debug_printk = SD_CANTMASK;
64 EXPORT_SYMBOL(spl_debug_printk);
65 module_param(spl_debug_printk, ulong, 0644);
66 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
67
68 int spl_debug_mb = -1;
69 EXPORT_SYMBOL(spl_debug_mb);
70 module_param(spl_debug_mb, int, 0644);
71 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
72
73 unsigned int spl_debug_binary = 1;
74 EXPORT_SYMBOL(spl_debug_binary);
75
76 unsigned int spl_debug_catastrophe;
77 EXPORT_SYMBOL(spl_debug_catastrophe);
78
79 unsigned int spl_debug_panic_on_bug = 0;
80 EXPORT_SYMBOL(spl_debug_panic_on_bug);
81 module_param(spl_debug_panic_on_bug, uint, 0644);
82 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
83
84 static char spl_debug_file_name[PATH_MAX];
85 char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
86
87 unsigned int spl_console_ratelimit = 1;
88 EXPORT_SYMBOL(spl_console_ratelimit);
89
90 long spl_console_max_delay;
91 EXPORT_SYMBOL(spl_console_max_delay);
92
93 long spl_console_min_delay;
94 EXPORT_SYMBOL(spl_console_min_delay);
95
96 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
97 EXPORT_SYMBOL(spl_console_backoff);
98
99 unsigned int spl_debug_stack;
100 EXPORT_SYMBOL(spl_debug_stack);
101
102 static int spl_panic_in_progress;
103
104 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
105 char *trace_console_buffers[NR_CPUS][3];
106 struct rw_semaphore trace_sem;
107 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
108
109 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
110 static void trace_fini(void);
111
112
113 /* Memory percentage breakdown by type */
114 static unsigned int pages_factor[TCD_TYPE_MAX] = {
115 80, /* 80% pages for TCD_TYPE_PROC */
116 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
117 10 /* 10% pages for TCD_TYPE_IRQ */
118 };
119
120 const char *
121 spl_debug_subsys2str(int subsys)
122 {
123 switch (subsys) {
124 default:
125 return NULL;
126 case SS_UNDEFINED:
127 return "undefined";
128 case SS_ATOMIC:
129 return "atomic";
130 case SS_KOBJ:
131 return "kobj";
132 case SS_VNODE:
133 return "vnode";
134 case SS_TIME:
135 return "time";
136 case SS_RWLOCK:
137 return "rwlock";
138 case SS_THREAD:
139 return "thread";
140 case SS_CONDVAR:
141 return "condvar";
142 case SS_MUTEX:
143 return "mutex";
144 case SS_RNG:
145 return "rng";
146 case SS_TASKQ:
147 return "taskq";
148 case SS_KMEM:
149 return "kmem";
150 case SS_DEBUG:
151 return "debug";
152 case SS_GENERIC:
153 return "generic";
154 case SS_PROC:
155 return "proc";
156 case SS_MODULE:
157 return "module";
158 case SS_CRED:
159 return "cred";
160 case SS_KSTAT:
161 return "kstat";
162 case SS_XDR:
163 return "xdr";
164 case SS_TSD:
165 return "tsd";
166 case SS_ZLIB:
167 return "zlib";
168 case SS_USER1:
169 return "user1";
170 case SS_USER2:
171 return "user2";
172 case SS_USER3:
173 return "user3";
174 case SS_USER4:
175 return "user4";
176 case SS_USER5:
177 return "user5";
178 case SS_USER6:
179 return "user6";
180 case SS_USER7:
181 return "user7";
182 case SS_USER8:
183 return "user8";
184 }
185 }
186
187 const char *
188 spl_debug_dbg2str(int debug)
189 {
190 switch (debug) {
191 default:
192 return NULL;
193 case SD_TRACE:
194 return "trace";
195 case SD_INFO:
196 return "info";
197 case SD_WARNING:
198 return "warning";
199 case SD_ERROR:
200 return "error";
201 case SD_EMERG:
202 return "emerg";
203 case SD_CONSOLE:
204 return "console";
205 case SD_IOCTL:
206 return "ioctl";
207 case SD_DPRINTF:
208 return "dprintf";
209 case SD_OTHER:
210 return "other";
211 }
212 }
213
214 int
215 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
216 {
217 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
218 spl_debug_dbg2str;
219 const char *token;
220 int i, bit, len = 0;
221
222 if (mask == 0) { /* "0" */
223 if (size > 0)
224 str[0] = '0';
225 len = 1;
226 } else { /* space-separated tokens */
227 for (i = 0; i < 32; i++) {
228 bit = 1 << i;
229
230 if ((mask & bit) == 0)
231 continue;
232
233 token = fn(bit);
234 if (token == NULL) /* unused bit */
235 continue;
236
237 if (len > 0) { /* separator? */
238 if (len < size)
239 str[len] = ' ';
240 len++;
241 }
242
243 while (*token != 0) {
244 if (len < size)
245 str[len] = *token;
246 token++;
247 len++;
248 }
249 }
250 }
251
252 /* terminate 'str' */
253 if (len < size)
254 str[len] = 0;
255 else
256 str[size - 1] = 0;
257
258 return len;
259 }
260
261 static int
262 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
263 {
264 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
265 spl_debug_dbg2str;
266 const char *token;
267 int i, j, bit;
268
269 /* match against known tokens */
270 for (i = 0; i < 32; i++) {
271 bit = 1 << i;
272
273 token = fn(bit);
274 if (token == NULL) /* unused? */
275 continue;
276
277 /* strcasecmp */
278 for (j = 0; ; j++) {
279 if (j == len) { /* end of token */
280 if (token[j] == 0) {
281 *mask = bit;
282 return 0;
283 }
284 break;
285 }
286
287 if (token[j] == 0)
288 break;
289
290 if (str[j] == token[j])
291 continue;
292
293 if (str[j] < 'A' || 'Z' < str[j])
294 break;
295
296 if (str[j] - 'A' + 'a' != token[j])
297 break;
298 }
299 }
300
301 return -EINVAL; /* no match */
302 }
303
304 int
305 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
306 {
307 char op = 0;
308 int m = 0, matched, n, t;
309
310 /* Allow a number for backwards compatibility */
311 for (n = strlen(str); n > 0; n--)
312 if (!isspace(str[n-1]))
313 break;
314 matched = n;
315
316 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
317 *mask = m;
318 return 0;
319 }
320
321 /* <str> must be a list of debug tokens or numbers separated by
322 * whitespace and optionally an operator ('+' or '-'). If an operator
323 * appears first in <str>, '*mask' is used as the starting point
324 * (relative), otherwise 0 is used (absolute). An operator applies to
325 * all following tokens up to the next operator. */
326 matched = 0;
327 while (*str != 0) {
328 while (isspace(*str)) /* skip whitespace */
329 str++;
330
331 if (*str == 0)
332 break;
333
334 if (*str == '+' || *str == '-') {
335 op = *str++;
336
337 /* op on first token == relative */
338 if (!matched)
339 m = *mask;
340
341 while (isspace(*str)) /* skip whitespace */
342 str++;
343
344 if (*str == 0) /* trailing op */
345 return -EINVAL;
346 }
347
348 /* find token length */
349 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
350
351 /* match token */
352 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
353 return -EINVAL;
354
355 matched = 1;
356 if (op == '-')
357 m &= ~t;
358 else
359 m |= t;
360
361 str += n;
362 }
363
364 if (!matched)
365 return -EINVAL;
366
367 *mask = m;
368 return 0;
369 }
370
371 static void
372 spl_debug_dumplog_internal(dumplog_priv_t *dp)
373 {
374 void *journal_info;
375
376 journal_info = current->journal_info;
377 current->journal_info = NULL;
378
379 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
380 "%s.%ld.%ld", spl_debug_file_path,
381 get_seconds(), (long)dp->dp_pid);
382 printk("SPL: Dumping log to %s\n", spl_debug_file_name);
383 spl_debug_dump_all_pages(dp, spl_debug_file_name);
384
385 current->journal_info = journal_info;
386 }
387
388 static int
389 spl_debug_dumplog_thread(void *arg)
390 {
391 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
392
393 spl_debug_dumplog_internal(dp);
394 atomic_set(&dp->dp_done, 1);
395 wake_up(&dp->dp_waitq);
396 complete_and_exit(NULL, 0);
397
398 return 0; /* Unreachable */
399 }
400
401 /* When flag is set do not use a new thread for the debug dump */
402 int
403 spl_debug_dumplog(int flags)
404 {
405 struct task_struct *tsk;
406 dumplog_priv_t dp;
407
408 init_waitqueue_head(&dp.dp_waitq);
409 dp.dp_pid = current->pid;
410 dp.dp_flags = flags;
411 atomic_set(&dp.dp_done, 0);
412
413 if (dp.dp_flags & DL_NOTHREAD) {
414 spl_debug_dumplog_internal(&dp);
415 } else {
416
417 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
418 if (tsk == NULL)
419 return -ENOMEM;
420
421 wake_up_process(tsk);
422 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
423 }
424
425 return 0;
426 }
427 EXPORT_SYMBOL(spl_debug_dumplog);
428
429 static char *
430 trace_get_console_buffer(void)
431 {
432 int cpu = get_cpu();
433 int idx;
434
435 if (in_irq()) {
436 idx = 0;
437 } else if (in_softirq()) {
438 idx = 1;
439 } else {
440 idx = 2;
441 }
442
443 return trace_console_buffers[cpu][idx];
444 }
445
446 static void
447 trace_put_console_buffer(char *buffer)
448 {
449 put_cpu();
450 }
451
452 static int
453 trace_lock_tcd(struct trace_cpu_data *tcd)
454 {
455 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
456
457 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
458
459 return 1;
460 }
461
462 static void
463 trace_unlock_tcd(struct trace_cpu_data *tcd)
464 {
465 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
466
467 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
468 }
469
470 static struct trace_cpu_data *
471 trace_get_tcd(void)
472 {
473 int cpu;
474 struct trace_cpu_data *tcd;
475
476 cpu = get_cpu();
477 if (in_irq())
478 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
479 else if (in_softirq())
480 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
481 else
482 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
483
484 trace_lock_tcd(tcd);
485
486 return tcd;
487 }
488
489 static void
490 trace_put_tcd (struct trace_cpu_data *tcd)
491 {
492 trace_unlock_tcd(tcd);
493
494 put_cpu();
495 }
496
497 static void
498 trace_set_debug_header(struct spl_debug_header *header, int subsys,
499 int mask, const int line, unsigned long stack)
500 {
501 struct timeval tv;
502
503 do_gettimeofday(&tv);
504
505 header->ph_subsys = subsys;
506 header->ph_mask = mask;
507 header->ph_cpu_id = smp_processor_id();
508 header->ph_sec = (__u32)tv.tv_sec;
509 header->ph_usec = tv.tv_usec;
510 header->ph_stack = stack;
511 header->ph_pid = current->pid;
512 header->ph_line_num = line;
513
514 return;
515 }
516
517 static void
518 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
519 int len, const char *file, const char *fn)
520 {
521 char *prefix = "SPL", *ptype = NULL;
522
523 if ((mask & SD_EMERG) != 0) {
524 prefix = "SPLError";
525 ptype = KERN_EMERG;
526 } else if ((mask & SD_ERROR) != 0) {
527 prefix = "SPLError";
528 ptype = KERN_ERR;
529 } else if ((mask & SD_WARNING) != 0) {
530 prefix = "SPL";
531 ptype = KERN_WARNING;
532 } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
533 prefix = "SPL";
534 ptype = KERN_INFO;
535 }
536
537 if ((mask & SD_CONSOLE) != 0) {
538 printk("%s%s: %.*s", ptype, prefix, len, buf);
539 } else {
540 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
541 hdr->ph_pid, hdr->ph_stack, file,
542 hdr->ph_line_num, fn, len, buf);
543 }
544
545 return;
546 }
547
548 static int
549 trace_max_debug_mb(void)
550 {
551 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
552 }
553
554 static struct trace_page *
555 tage_alloc(int gfp)
556 {
557 struct page *page;
558 struct trace_page *tage;
559
560 page = alloc_pages(gfp | __GFP_NOWARN, 0);
561 if (page == NULL)
562 return NULL;
563
564 tage = kmalloc(sizeof(*tage), gfp);
565 if (tage == NULL) {
566 __free_pages(page, 0);
567 return NULL;
568 }
569
570 tage->page = page;
571 atomic_inc(&trace_tage_allocated);
572
573 return tage;
574 }
575
576 static void
577 tage_free(struct trace_page *tage)
578 {
579 __ASSERT(tage != NULL);
580 __ASSERT(tage->page != NULL);
581
582 __free_pages(tage->page, 0);
583 kfree(tage);
584 atomic_dec(&trace_tage_allocated);
585 }
586
587 static struct trace_page *
588 tage_from_list(struct list_head *list)
589 {
590 return list_entry(list, struct trace_page, linkage);
591 }
592
593 static void
594 tage_to_tail(struct trace_page *tage, struct list_head *queue)
595 {
596 __ASSERT(tage != NULL);
597 __ASSERT(queue != NULL);
598
599 list_move_tail(&tage->linkage, queue);
600 }
601
602 /* try to return a page that has 'len' bytes left at the end */
603 static struct trace_page *
604 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
605 {
606 struct trace_page *tage;
607
608 if (tcd->tcd_cur_pages > 0) {
609 __ASSERT(!list_empty(&tcd->tcd_pages));
610 tage = tage_from_list(tcd->tcd_pages.prev);
611 if (tage->used + len <= PAGE_SIZE)
612 return tage;
613 }
614
615 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
616 if (tcd->tcd_cur_stock_pages > 0) {
617 tage = tage_from_list(tcd->tcd_stock_pages.prev);
618 tcd->tcd_cur_stock_pages--;
619 list_del_init(&tage->linkage);
620 } else {
621 tage = tage_alloc(GFP_ATOMIC);
622 if (tage == NULL) {
623 printk(KERN_WARNING
624 "failure to allocate a tage (%ld)\n",
625 tcd->tcd_cur_pages);
626 return NULL;
627 }
628 }
629
630 tage->used = 0;
631 tage->cpu = smp_processor_id();
632 tage->type = tcd->tcd_type;
633 list_add_tail(&tage->linkage, &tcd->tcd_pages);
634 tcd->tcd_cur_pages++;
635
636 return tage;
637 }
638
639 return NULL;
640 }
641
642 /* return a page that has 'len' bytes left at the end */
643 static struct trace_page *
644 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
645 {
646 struct trace_page *tage;
647
648 __ASSERT(len <= PAGE_SIZE);
649
650 tage = trace_get_tage_try(tcd, len);
651 if (tage)
652 return tage;
653
654 if (tcd->tcd_cur_pages > 0) {
655 tage = tage_from_list(tcd->tcd_pages.next);
656 tage->used = 0;
657 tage_to_tail(tage, &tcd->tcd_pages);
658 }
659
660 return tage;
661 }
662
663 int
664 spl_debug_msg(void *arg, int subsys, int mask, const char *file,
665 const char *fn, const int line, const char *format, ...)
666 {
667 spl_debug_limit_state_t *cdls = arg;
668 struct trace_cpu_data *tcd = NULL;
669 struct spl_debug_header header = { 0, };
670 struct trace_page *tage;
671 /* string_buf is used only if tcd != NULL, and is always set then */
672 char *string_buf = NULL;
673 char *debug_buf;
674 int known_size;
675 int needed = 85; /* average message length */
676 int max_nob;
677 va_list ap;
678 int i;
679
680 if (subsys == 0)
681 subsys = SS_DEBUG_SUBSYS;
682
683 if (mask == 0)
684 mask = SD_EMERG;
685
686 if (strchr(file, '/'))
687 file = strrchr(file, '/') + 1;
688
689 tcd = trace_get_tcd();
690 trace_set_debug_header(&header, subsys, mask, line, 0);
691 if (tcd == NULL)
692 goto console;
693
694 if (tcd->tcd_shutting_down) {
695 trace_put_tcd(tcd);
696 tcd = NULL;
697 goto console;
698 }
699
700 known_size = strlen(file) + 1;
701 if (fn)
702 known_size += strlen(fn) + 1;
703
704 if (spl_debug_binary)
705 known_size += sizeof(header);
706
707 /* '2' used because vsnprintf returns real size required for output
708 * _without_ terminating NULL. */
709 for (i = 0; i < 2; i++) {
710 tage = trace_get_tage(tcd, needed + known_size + 1);
711 if (tage == NULL) {
712 if (needed + known_size > PAGE_SIZE)
713 mask |= SD_ERROR;
714
715 trace_put_tcd(tcd);
716 tcd = NULL;
717 goto console;
718 }
719
720 string_buf = (char *)page_address(tage->page) +
721 tage->used + known_size;
722
723 max_nob = PAGE_SIZE - tage->used - known_size;
724 if (max_nob <= 0) {
725 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
726 mask |= SD_ERROR;
727 trace_put_tcd(tcd);
728 tcd = NULL;
729 goto console;
730 }
731
732 needed = 0;
733 if (format) {
734 va_start(ap, format);
735 needed += vsnprintf(string_buf, max_nob, format, ap);
736 va_end(ap);
737 }
738
739 if (needed < max_nob)
740 break;
741 }
742
743 header.ph_len = known_size + needed;
744 debug_buf = (char *)page_address(tage->page) + tage->used;
745
746 if (spl_debug_binary) {
747 memcpy(debug_buf, &header, sizeof(header));
748 tage->used += sizeof(header);
749 debug_buf += sizeof(header);
750 }
751
752 strcpy(debug_buf, file);
753 tage->used += strlen(file) + 1;
754 debug_buf += strlen(file) + 1;
755
756 if (fn) {
757 strcpy(debug_buf, fn);
758 tage->used += strlen(fn) + 1;
759 debug_buf += strlen(fn) + 1;
760 }
761
762 __ASSERT(debug_buf == string_buf);
763
764 tage->used += needed;
765 __ASSERT (tage->used <= PAGE_SIZE);
766
767 console:
768 if ((mask & spl_debug_printk) == 0) {
769 /* no console output requested */
770 if (tcd != NULL)
771 trace_put_tcd(tcd);
772 return 1;
773 }
774
775 if (cdls != NULL) {
776 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
777 !time_before(cdls->cdls_next, jiffies)) {
778 /* skipping a console message */
779 cdls->cdls_count++;
780 if (tcd != NULL)
781 trace_put_tcd(tcd);
782 return 1;
783 }
784
785 if (time_before(cdls->cdls_next + spl_console_max_delay +
786 (10 * HZ), jiffies)) {
787 /* last timeout was a long time ago */
788 cdls->cdls_delay /= spl_console_backoff * 4;
789 } else {
790 cdls->cdls_delay *= spl_console_backoff;
791
792 if (cdls->cdls_delay < spl_console_min_delay)
793 cdls->cdls_delay = spl_console_min_delay;
794 else if (cdls->cdls_delay > spl_console_max_delay)
795 cdls->cdls_delay = spl_console_max_delay;
796 }
797
798 /* ensure cdls_next is never zero after it's been seen */
799 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
800 }
801
802 if (tcd != NULL) {
803 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
804 trace_put_tcd(tcd);
805 } else {
806 string_buf = trace_get_console_buffer();
807
808 needed = 0;
809 if (format != NULL) {
810 va_start(ap, format);
811 needed += vsnprintf(string_buf,
812 TRACE_CONSOLE_BUFFER_SIZE, format, ap);
813 va_end(ap);
814 }
815 trace_print_to_console(&header, mask,
816 string_buf, needed, file, fn);
817
818 trace_put_console_buffer(string_buf);
819 }
820
821 if (cdls != NULL && cdls->cdls_count != 0) {
822 string_buf = trace_get_console_buffer();
823
824 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
825 "Skipped %d previous similar message%s\n",
826 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
827
828 trace_print_to_console(&header, mask,
829 string_buf, needed, file, fn);
830
831 trace_put_console_buffer(string_buf);
832 cdls->cdls_count = 0;
833 }
834
835 return 0;
836 }
837 EXPORT_SYMBOL(spl_debug_msg);
838
839 /* Do the collect_pages job on a single CPU: assumes that all other
840 * CPUs have been stopped during a panic. If this isn't true for
841 * some arch, this will have to be implemented separately in each arch.
842 */
843 static void
844 collect_pages_from_single_cpu(struct page_collection *pc)
845 {
846 struct trace_cpu_data *tcd;
847 int i, j;
848
849 tcd_for_each(tcd, i, j) {
850 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
851 tcd->tcd_cur_pages = 0;
852 }
853 }
854
855 static void
856 collect_pages_on_all_cpus(struct page_collection *pc)
857 {
858 struct trace_cpu_data *tcd;
859 int i, cpu;
860
861 spin_lock(&pc->pc_lock);
862 for_each_possible_cpu(cpu) {
863 tcd_for_each_type_lock(tcd, i, cpu) {
864 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
865 tcd->tcd_cur_pages = 0;
866 }
867 }
868 spin_unlock(&pc->pc_lock);
869 }
870
871 static void
872 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
873 {
874 INIT_LIST_HEAD(&pc->pc_pages);
875
876 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
877 collect_pages_from_single_cpu(pc);
878 else
879 collect_pages_on_all_cpus(pc);
880 }
881
882 static void
883 put_pages_back_on_all_cpus(struct page_collection *pc)
884 {
885 struct trace_cpu_data *tcd;
886 struct list_head *cur_head;
887 struct trace_page *tage;
888 struct trace_page *tmp;
889 int i, cpu;
890
891 spin_lock(&pc->pc_lock);
892
893 for_each_possible_cpu(cpu) {
894 tcd_for_each_type_lock(tcd, i, cpu) {
895 cur_head = tcd->tcd_pages.next;
896
897 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
898 linkage) {
899 if (tage->cpu != cpu || tage->type != i)
900 continue;
901
902 tage_to_tail(tage, cur_head);
903 tcd->tcd_cur_pages++;
904 }
905 }
906 }
907
908 spin_unlock(&pc->pc_lock);
909 }
910
911 static void
912 put_pages_back(struct page_collection *pc)
913 {
914 if (!spl_panic_in_progress)
915 put_pages_back_on_all_cpus(pc);
916 }
917
918 static int
919 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
920 {
921 struct page_collection pc;
922 struct file *filp;
923 struct trace_page *tage;
924 struct trace_page *tmp;
925 mm_segment_t oldfs;
926 int rc = 0;
927
928 down_write(&trace_sem);
929
930 filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
931 0600, &rc);
932 if (filp == NULL) {
933 if (rc != -EEXIST)
934 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
935 filename, rc);
936 goto out;
937 }
938
939 spin_lock_init(&pc.pc_lock);
940 collect_pages(dp, &pc);
941 if (list_empty(&pc.pc_pages)) {
942 rc = 0;
943 goto close;
944 }
945
946 oldfs = get_fs();
947 set_fs(get_ds());
948
949 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
950 rc = spl_filp_write(filp, page_address(tage->page),
951 tage->used, spl_filp_poff(filp));
952 if (rc != (int)tage->used) {
953 printk(KERN_WARNING "SPL: Wanted to write %u "
954 "but wrote %d\n", tage->used, rc);
955 put_pages_back(&pc);
956 __ASSERT(list_empty(&pc.pc_pages));
957 break;
958 }
959 list_del(&tage->linkage);
960 tage_free(tage);
961 }
962
963 set_fs(oldfs);
964
965 rc = spl_filp_fsync(filp, 1);
966 if (rc)
967 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
968 close:
969 spl_filp_close(filp);
970 out:
971 up_write(&trace_sem);
972
973 return rc;
974 }
975
976 static void
977 spl_debug_flush_pages(void)
978 {
979 dumplog_priv_t dp;
980 struct page_collection pc;
981 struct trace_page *tage;
982 struct trace_page *tmp;
983
984 spin_lock_init(&pc.pc_lock);
985 init_waitqueue_head(&dp.dp_waitq);
986 dp.dp_pid = current->pid;
987 dp.dp_flags = 0;
988 atomic_set(&dp.dp_done, 0);
989
990 collect_pages(&dp, &pc);
991 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
992 list_del(&tage->linkage);
993 tage_free(tage);
994 }
995 }
996
997 unsigned long
998 spl_debug_set_mask(unsigned long mask) {
999 spl_debug_mask = mask;
1000 return 0;
1001 }
1002 EXPORT_SYMBOL(spl_debug_set_mask);
1003
1004 unsigned long
1005 spl_debug_get_mask(void) {
1006 return spl_debug_mask;
1007 }
1008 EXPORT_SYMBOL(spl_debug_get_mask);
1009
1010 unsigned long
1011 spl_debug_set_subsys(unsigned long subsys) {
1012 spl_debug_subsys = subsys;
1013 return 0;
1014 }
1015 EXPORT_SYMBOL(spl_debug_set_subsys);
1016
1017 unsigned long
1018 spl_debug_get_subsys(void) {
1019 return spl_debug_subsys;
1020 }
1021 EXPORT_SYMBOL(spl_debug_get_subsys);
1022
1023 int
1024 spl_debug_set_mb(int mb)
1025 {
1026 int i, j, pages;
1027 int limit = trace_max_debug_mb();
1028 struct trace_cpu_data *tcd;
1029
1030 if (mb < num_possible_cpus()) {
1031 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1032 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1033 return -EINVAL;
1034 }
1035
1036 if (mb > limit) {
1037 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1038 "%dMB - upper limit is %d\n", mb, limit);
1039 return -EINVAL;
1040 }
1041
1042 mb /= num_possible_cpus();
1043 pages = mb << (20 - PAGE_SHIFT);
1044
1045 down_write(&trace_sem);
1046
1047 tcd_for_each(tcd, i, j)
1048 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1049
1050 up_write(&trace_sem);
1051
1052 return 0;
1053 }
1054 EXPORT_SYMBOL(spl_debug_set_mb);
1055
1056 int
1057 spl_debug_get_mb(void)
1058 {
1059 int i, j;
1060 struct trace_cpu_data *tcd;
1061 int total_pages = 0;
1062
1063 down_read(&trace_sem);
1064
1065 tcd_for_each(tcd, i, j)
1066 total_pages += tcd->tcd_max_pages;
1067
1068 up_read(&trace_sem);
1069
1070 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1071 }
1072 EXPORT_SYMBOL(spl_debug_get_mb);
1073
1074 void spl_debug_dumpstack(struct task_struct *tsk)
1075 {
1076 extern void show_task(struct task_struct *);
1077
1078 if (tsk == NULL)
1079 tsk = current;
1080
1081 printk("SPL: Showing stack for process %d\n", tsk->pid);
1082 dump_stack();
1083 }
1084 EXPORT_SYMBOL(spl_debug_dumpstack);
1085
1086 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1087 {
1088 spl_debug_catastrophe = 1;
1089 spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
1090
1091 if (in_interrupt())
1092 panic("SPL PANIC in interrupt.\n");
1093
1094 if (in_atomic() || irqs_disabled())
1095 flags |= DL_NOTHREAD;
1096
1097 /* Ensure all debug pages and dumped by current cpu */
1098 if (spl_debug_panic_on_bug)
1099 spl_panic_in_progress = 1;
1100
1101 spl_debug_dumpstack(NULL);
1102 spl_debug_dumplog(flags);
1103
1104 if (spl_debug_panic_on_bug)
1105 panic("SPL PANIC");
1106
1107 set_task_state(current, TASK_UNINTERRUPTIBLE);
1108 while (1)
1109 schedule();
1110 }
1111 EXPORT_SYMBOL(spl_debug_bug);
1112
1113 int
1114 spl_debug_clear_buffer(void)
1115 {
1116 spl_debug_flush_pages();
1117 return 0;
1118 }
1119 EXPORT_SYMBOL(spl_debug_clear_buffer);
1120
1121 int
1122 spl_debug_mark_buffer(char *text)
1123 {
1124 SDEBUG(SD_WARNING, "*************************************\n");
1125 SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
1126 SDEBUG(SD_WARNING, "*************************************\n");
1127
1128 return 0;
1129 }
1130 EXPORT_SYMBOL(spl_debug_mark_buffer);
1131
1132 static int
1133 trace_init(int max_pages)
1134 {
1135 struct trace_cpu_data *tcd;
1136 int i, j;
1137
1138 init_rwsem(&trace_sem);
1139
1140 /* initialize trace_data */
1141 memset(trace_data, 0, sizeof(trace_data));
1142 for (i = 0; i < TCD_TYPE_MAX; i++) {
1143 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1144 NR_CPUS, GFP_KERNEL);
1145 if (trace_data[i] == NULL)
1146 goto out;
1147 }
1148
1149 tcd_for_each(tcd, i, j) {
1150 spin_lock_init(&tcd->tcd_lock);
1151 tcd->tcd_pages_factor = pages_factor[i];
1152 tcd->tcd_type = i;
1153 tcd->tcd_cpu = j;
1154 INIT_LIST_HEAD(&tcd->tcd_pages);
1155 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1156 tcd->tcd_cur_pages = 0;
1157 tcd->tcd_cur_stock_pages = 0;
1158 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1159 tcd->tcd_shutting_down = 0;
1160 }
1161
1162 for (i = 0; i < num_possible_cpus(); i++) {
1163 for (j = 0; j < 3; j++) {
1164 trace_console_buffers[i][j] =
1165 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1166 GFP_KERNEL);
1167
1168 if (trace_console_buffers[i][j] == NULL)
1169 goto out;
1170 }
1171 }
1172
1173 return 0;
1174 out:
1175 trace_fini();
1176 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1177 return -ENOMEM;
1178 }
1179
1180 int
1181 spl_debug_init(void)
1182 {
1183 int rc, max = spl_debug_mb;
1184
1185 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1186 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1187
1188 /* If spl_debug_mb is set to an invalid value or uninitialized
1189 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1190 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1191 max >= 512 || max < 0) {
1192 max = TCD_MAX_PAGES;
1193 } else {
1194 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1195 }
1196
1197 rc = trace_init(max);
1198 if (rc)
1199 return rc;
1200
1201 return rc;
1202 }
1203
1204 static void
1205 trace_cleanup_on_all_cpus(void)
1206 {
1207 struct trace_cpu_data *tcd;
1208 struct trace_page *tage;
1209 struct trace_page *tmp;
1210 int i, cpu;
1211
1212 for_each_possible_cpu(cpu) {
1213 tcd_for_each_type_lock(tcd, i, cpu) {
1214 tcd->tcd_shutting_down = 1;
1215
1216 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1217 linkage) {
1218 list_del(&tage->linkage);
1219 tage_free(tage);
1220 }
1221 tcd->tcd_cur_pages = 0;
1222 }
1223 }
1224 }
1225
1226 static void
1227 trace_fini(void)
1228 {
1229 int i, j;
1230
1231 trace_cleanup_on_all_cpus();
1232
1233 for (i = 0; i < num_possible_cpus(); i++) {
1234 for (j = 0; j < 3; j++) {
1235 if (trace_console_buffers[i][j] != NULL) {
1236 kfree(trace_console_buffers[i][j]);
1237 trace_console_buffers[i][j] = NULL;
1238 }
1239 }
1240 }
1241
1242 for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
1243 kfree(trace_data[i]);
1244 trace_data[i] = NULL;
1245 }
1246 }
1247
1248 void
1249 spl_debug_fini(void)
1250 {
1251 trace_fini();
1252 }
1253
1254 #endif /* DEBUG_LOG */