]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-debug.c
Imported Upstream version 0.6.2
[mirror_spl-debian.git] / module / spl / spl-debug.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Debug Implementation.
25 \*****************************************************************************/
26
27 #include <linux/kmod.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/kthread.h>
34 #include <linux/hardirq.h>
35 #include <linux/interrupt.h>
36 #include <linux/spinlock.h>
37 #include <linux/proc_compat.h>
38 #include <linux/file_compat.h>
39 #include <linux/swap.h>
40 #include <sys/sysmacros.h>
41 #include <spl-debug.h>
42 #include <spl-trace.h>
43 #include <spl-ctl.h>
44
45 #ifdef SS_DEBUG_SUBSYS
46 #undef SS_DEBUG_SUBSYS
47 #endif
48
49 #define SS_DEBUG_SUBSYS SS_DEBUG
50
51 /* Debug log support enabled */
52 #ifdef DEBUG_LOG
53
54 unsigned long spl_debug_subsys = ~0;
55 EXPORT_SYMBOL(spl_debug_subsys);
56 module_param(spl_debug_subsys, ulong, 0644);
57 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
58
59 unsigned long spl_debug_mask = SD_CANTMASK;
60 EXPORT_SYMBOL(spl_debug_mask);
61 module_param(spl_debug_mask, ulong, 0644);
62 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
63
64 unsigned long spl_debug_printk = SD_CANTMASK;
65 EXPORT_SYMBOL(spl_debug_printk);
66 module_param(spl_debug_printk, ulong, 0644);
67 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
68
69 int spl_debug_mb = -1;
70 EXPORT_SYMBOL(spl_debug_mb);
71 module_param(spl_debug_mb, int, 0644);
72 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
73
74 unsigned int spl_debug_binary = 1;
75 EXPORT_SYMBOL(spl_debug_binary);
76
77 unsigned int spl_debug_catastrophe;
78 EXPORT_SYMBOL(spl_debug_catastrophe);
79
80 unsigned int spl_debug_panic_on_bug = 0;
81 EXPORT_SYMBOL(spl_debug_panic_on_bug);
82 module_param(spl_debug_panic_on_bug, uint, 0644);
83 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
84
85 static char spl_debug_file_name[PATH_MAX];
86 char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
87
88 unsigned int spl_console_ratelimit = 1;
89 EXPORT_SYMBOL(spl_console_ratelimit);
90
91 long spl_console_max_delay;
92 EXPORT_SYMBOL(spl_console_max_delay);
93
94 long spl_console_min_delay;
95 EXPORT_SYMBOL(spl_console_min_delay);
96
97 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
98 EXPORT_SYMBOL(spl_console_backoff);
99
100 unsigned int spl_debug_stack;
101 EXPORT_SYMBOL(spl_debug_stack);
102
103 static int spl_panic_in_progress;
104
105 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
106 char *trace_console_buffers[NR_CPUS][3];
107 struct rw_semaphore trace_sem;
108 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
109
110 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
111 static void trace_fini(void);
112
113
114 /* Memory percentage breakdown by type */
115 static unsigned int pages_factor[TCD_TYPE_MAX] = {
116 80, /* 80% pages for TCD_TYPE_PROC */
117 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
118 10 /* 10% pages for TCD_TYPE_IRQ */
119 };
120
121 const char *
122 spl_debug_subsys2str(int subsys)
123 {
124 switch (subsys) {
125 default:
126 return NULL;
127 case SS_UNDEFINED:
128 return "undefined";
129 case SS_ATOMIC:
130 return "atomic";
131 case SS_KOBJ:
132 return "kobj";
133 case SS_VNODE:
134 return "vnode";
135 case SS_TIME:
136 return "time";
137 case SS_RWLOCK:
138 return "rwlock";
139 case SS_THREAD:
140 return "thread";
141 case SS_CONDVAR:
142 return "condvar";
143 case SS_MUTEX:
144 return "mutex";
145 case SS_RNG:
146 return "rng";
147 case SS_TASKQ:
148 return "taskq";
149 case SS_KMEM:
150 return "kmem";
151 case SS_DEBUG:
152 return "debug";
153 case SS_GENERIC:
154 return "generic";
155 case SS_PROC:
156 return "proc";
157 case SS_MODULE:
158 return "module";
159 case SS_CRED:
160 return "cred";
161 case SS_KSTAT:
162 return "kstat";
163 case SS_XDR:
164 return "xdr";
165 case SS_TSD:
166 return "tsd";
167 case SS_ZLIB:
168 return "zlib";
169 case SS_USER1:
170 return "user1";
171 case SS_USER2:
172 return "user2";
173 case SS_USER3:
174 return "user3";
175 case SS_USER4:
176 return "user4";
177 case SS_USER5:
178 return "user5";
179 case SS_USER6:
180 return "user6";
181 case SS_USER7:
182 return "user7";
183 case SS_USER8:
184 return "user8";
185 }
186 }
187
188 const char *
189 spl_debug_dbg2str(int debug)
190 {
191 switch (debug) {
192 default:
193 return NULL;
194 case SD_TRACE:
195 return "trace";
196 case SD_INFO:
197 return "info";
198 case SD_WARNING:
199 return "warning";
200 case SD_ERROR:
201 return "error";
202 case SD_EMERG:
203 return "emerg";
204 case SD_CONSOLE:
205 return "console";
206 case SD_IOCTL:
207 return "ioctl";
208 case SD_DPRINTF:
209 return "dprintf";
210 case SD_OTHER:
211 return "other";
212 }
213 }
214
215 int
216 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
217 {
218 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
219 spl_debug_dbg2str;
220 const char *token;
221 int i, bit, len = 0;
222
223 if (mask == 0) { /* "0" */
224 if (size > 0)
225 str[0] = '0';
226 len = 1;
227 } else { /* space-separated tokens */
228 for (i = 0; i < 32; i++) {
229 bit = 1 << i;
230
231 if ((mask & bit) == 0)
232 continue;
233
234 token = fn(bit);
235 if (token == NULL) /* unused bit */
236 continue;
237
238 if (len > 0) { /* separator? */
239 if (len < size)
240 str[len] = ' ';
241 len++;
242 }
243
244 while (*token != 0) {
245 if (len < size)
246 str[len] = *token;
247 token++;
248 len++;
249 }
250 }
251 }
252
253 /* terminate 'str' */
254 if (len < size)
255 str[len] = 0;
256 else
257 str[size - 1] = 0;
258
259 return len;
260 }
261
262 static int
263 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
264 {
265 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
266 spl_debug_dbg2str;
267 const char *token;
268 int i, j, bit;
269
270 /* match against known tokens */
271 for (i = 0; i < 32; i++) {
272 bit = 1 << i;
273
274 token = fn(bit);
275 if (token == NULL) /* unused? */
276 continue;
277
278 /* strcasecmp */
279 for (j = 0; ; j++) {
280 if (j == len) { /* end of token */
281 if (token[j] == 0) {
282 *mask = bit;
283 return 0;
284 }
285 break;
286 }
287
288 if (token[j] == 0)
289 break;
290
291 if (str[j] == token[j])
292 continue;
293
294 if (str[j] < 'A' || 'Z' < str[j])
295 break;
296
297 if (str[j] - 'A' + 'a' != token[j])
298 break;
299 }
300 }
301
302 return -EINVAL; /* no match */
303 }
304
305 int
306 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
307 {
308 char op = 0;
309 int m = 0, matched, n, t;
310
311 /* Allow a number for backwards compatibility */
312 for (n = strlen(str); n > 0; n--)
313 if (!isspace(str[n-1]))
314 break;
315 matched = n;
316
317 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
318 *mask = m;
319 return 0;
320 }
321
322 /* <str> must be a list of debug tokens or numbers separated by
323 * whitespace and optionally an operator ('+' or '-'). If an operator
324 * appears first in <str>, '*mask' is used as the starting point
325 * (relative), otherwise 0 is used (absolute). An operator applies to
326 * all following tokens up to the next operator. */
327 matched = 0;
328 while (*str != 0) {
329 while (isspace(*str)) /* skip whitespace */
330 str++;
331
332 if (*str == 0)
333 break;
334
335 if (*str == '+' || *str == '-') {
336 op = *str++;
337
338 /* op on first token == relative */
339 if (!matched)
340 m = *mask;
341
342 while (isspace(*str)) /* skip whitespace */
343 str++;
344
345 if (*str == 0) /* trailing op */
346 return -EINVAL;
347 }
348
349 /* find token length */
350 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
351
352 /* match token */
353 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
354 return -EINVAL;
355
356 matched = 1;
357 if (op == '-')
358 m &= ~t;
359 else
360 m |= t;
361
362 str += n;
363 }
364
365 if (!matched)
366 return -EINVAL;
367
368 *mask = m;
369 return 0;
370 }
371
372 static void
373 spl_debug_dumplog_internal(dumplog_priv_t *dp)
374 {
375 void *journal_info;
376
377 journal_info = current->journal_info;
378 current->journal_info = NULL;
379
380 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
381 "%s.%ld.%ld", spl_debug_file_path,
382 get_seconds(), (long)dp->dp_pid);
383 printk("SPL: Dumping log to %s\n", spl_debug_file_name);
384 spl_debug_dump_all_pages(dp, spl_debug_file_name);
385
386 current->journal_info = journal_info;
387 }
388
389 static int
390 spl_debug_dumplog_thread(void *arg)
391 {
392 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
393
394 spl_debug_dumplog_internal(dp);
395 atomic_set(&dp->dp_done, 1);
396 wake_up(&dp->dp_waitq);
397 complete_and_exit(NULL, 0);
398
399 return 0; /* Unreachable */
400 }
401
402 /* When flag is set do not use a new thread for the debug dump */
403 int
404 spl_debug_dumplog(int flags)
405 {
406 struct task_struct *tsk;
407 dumplog_priv_t dp;
408
409 init_waitqueue_head(&dp.dp_waitq);
410 dp.dp_pid = current->pid;
411 dp.dp_flags = flags;
412 atomic_set(&dp.dp_done, 0);
413
414 if (dp.dp_flags & DL_NOTHREAD) {
415 spl_debug_dumplog_internal(&dp);
416 } else {
417
418 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
419 if (tsk == NULL)
420 return -ENOMEM;
421
422 wake_up_process(tsk);
423 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
424 }
425
426 return 0;
427 }
428 EXPORT_SYMBOL(spl_debug_dumplog);
429
430 static char *
431 trace_get_console_buffer(void)
432 {
433 int cpu = get_cpu();
434 int idx;
435
436 if (in_irq()) {
437 idx = 0;
438 } else if (in_softirq()) {
439 idx = 1;
440 } else {
441 idx = 2;
442 }
443
444 return trace_console_buffers[cpu][idx];
445 }
446
447 static void
448 trace_put_console_buffer(char *buffer)
449 {
450 put_cpu();
451 }
452
453 static int
454 trace_lock_tcd(struct trace_cpu_data *tcd)
455 {
456 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
457
458 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
459
460 return 1;
461 }
462
463 static void
464 trace_unlock_tcd(struct trace_cpu_data *tcd)
465 {
466 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
467
468 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
469 }
470
471 static struct trace_cpu_data *
472 trace_get_tcd(void)
473 {
474 int cpu;
475 struct trace_cpu_data *tcd;
476
477 cpu = get_cpu();
478 if (in_irq())
479 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
480 else if (in_softirq())
481 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
482 else
483 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
484
485 trace_lock_tcd(tcd);
486
487 return tcd;
488 }
489
490 static void
491 trace_put_tcd (struct trace_cpu_data *tcd)
492 {
493 trace_unlock_tcd(tcd);
494
495 put_cpu();
496 }
497
498 static void
499 trace_set_debug_header(struct spl_debug_header *header, int subsys,
500 int mask, const int line, unsigned long stack)
501 {
502 struct timeval tv;
503
504 do_gettimeofday(&tv);
505
506 header->ph_subsys = subsys;
507 header->ph_mask = mask;
508 header->ph_cpu_id = smp_processor_id();
509 header->ph_sec = (__u32)tv.tv_sec;
510 header->ph_usec = tv.tv_usec;
511 header->ph_stack = stack;
512 header->ph_pid = current->pid;
513 header->ph_line_num = line;
514
515 return;
516 }
517
518 static void
519 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
520 int len, const char *file, const char *fn)
521 {
522 char *prefix = "SPL", *ptype = NULL;
523
524 if ((mask & SD_EMERG) != 0) {
525 prefix = "SPLError";
526 ptype = KERN_EMERG;
527 } else if ((mask & SD_ERROR) != 0) {
528 prefix = "SPLError";
529 ptype = KERN_ERR;
530 } else if ((mask & SD_WARNING) != 0) {
531 prefix = "SPL";
532 ptype = KERN_WARNING;
533 } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
534 prefix = "SPL";
535 ptype = KERN_INFO;
536 }
537
538 if ((mask & SD_CONSOLE) != 0) {
539 printk("%s%s: %.*s", ptype, prefix, len, buf);
540 } else {
541 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
542 hdr->ph_pid, hdr->ph_stack, file,
543 hdr->ph_line_num, fn, len, buf);
544 }
545
546 return;
547 }
548
549 static int
550 trace_max_debug_mb(void)
551 {
552 return MAX(512, ((totalram_pages >> (20 - PAGE_SHIFT)) * 80) / 100);
553 }
554
555 static struct trace_page *
556 tage_alloc(int gfp)
557 {
558 struct page *page;
559 struct trace_page *tage;
560
561 page = alloc_pages(gfp | __GFP_NOWARN, 0);
562 if (page == NULL)
563 return NULL;
564
565 tage = kmalloc(sizeof(*tage), gfp);
566 if (tage == NULL) {
567 __free_pages(page, 0);
568 return NULL;
569 }
570
571 tage->page = page;
572 atomic_inc(&trace_tage_allocated);
573
574 return tage;
575 }
576
577 static void
578 tage_free(struct trace_page *tage)
579 {
580 __ASSERT(tage != NULL);
581 __ASSERT(tage->page != NULL);
582
583 __free_pages(tage->page, 0);
584 kfree(tage);
585 atomic_dec(&trace_tage_allocated);
586 }
587
588 static struct trace_page *
589 tage_from_list(struct list_head *list)
590 {
591 return list_entry(list, struct trace_page, linkage);
592 }
593
594 static void
595 tage_to_tail(struct trace_page *tage, struct list_head *queue)
596 {
597 __ASSERT(tage != NULL);
598 __ASSERT(queue != NULL);
599
600 list_move_tail(&tage->linkage, queue);
601 }
602
603 /* try to return a page that has 'len' bytes left at the end */
604 static struct trace_page *
605 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
606 {
607 struct trace_page *tage;
608
609 if (tcd->tcd_cur_pages > 0) {
610 __ASSERT(!list_empty(&tcd->tcd_pages));
611 tage = tage_from_list(tcd->tcd_pages.prev);
612 if (tage->used + len <= PAGE_SIZE)
613 return tage;
614 }
615
616 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
617 if (tcd->tcd_cur_stock_pages > 0) {
618 tage = tage_from_list(tcd->tcd_stock_pages.prev);
619 tcd->tcd_cur_stock_pages--;
620 list_del_init(&tage->linkage);
621 } else {
622 tage = tage_alloc(GFP_ATOMIC);
623 if (tage == NULL) {
624 printk(KERN_WARNING
625 "failure to allocate a tage (%ld)\n",
626 tcd->tcd_cur_pages);
627 return NULL;
628 }
629 }
630
631 tage->used = 0;
632 tage->cpu = smp_processor_id();
633 tage->type = tcd->tcd_type;
634 list_add_tail(&tage->linkage, &tcd->tcd_pages);
635 tcd->tcd_cur_pages++;
636
637 return tage;
638 }
639
640 return NULL;
641 }
642
643 /* return a page that has 'len' bytes left at the end */
644 static struct trace_page *
645 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
646 {
647 struct trace_page *tage;
648
649 __ASSERT(len <= PAGE_SIZE);
650
651 tage = trace_get_tage_try(tcd, len);
652 if (tage)
653 return tage;
654
655 if (tcd->tcd_cur_pages > 0) {
656 tage = tage_from_list(tcd->tcd_pages.next);
657 tage->used = 0;
658 tage_to_tail(tage, &tcd->tcd_pages);
659 }
660
661 return tage;
662 }
663
664 int
665 spl_debug_msg(void *arg, int subsys, int mask, const char *file,
666 const char *fn, const int line, const char *format, ...)
667 {
668 spl_debug_limit_state_t *cdls = arg;
669 struct trace_cpu_data *tcd = NULL;
670 struct spl_debug_header header = { 0, };
671 struct trace_page *tage;
672 /* string_buf is used only if tcd != NULL, and is always set then */
673 char *string_buf = NULL;
674 char *debug_buf;
675 int known_size;
676 int needed = 85; /* average message length */
677 int max_nob;
678 va_list ap;
679 int i;
680
681 if (subsys == 0)
682 subsys = SS_DEBUG_SUBSYS;
683
684 if (mask == 0)
685 mask = SD_EMERG;
686
687 if (strchr(file, '/'))
688 file = strrchr(file, '/') + 1;
689
690 tcd = trace_get_tcd();
691 trace_set_debug_header(&header, subsys, mask, line, 0);
692 if (tcd == NULL)
693 goto console;
694
695 if (tcd->tcd_shutting_down) {
696 trace_put_tcd(tcd);
697 tcd = NULL;
698 goto console;
699 }
700
701 known_size = strlen(file) + 1;
702 if (fn)
703 known_size += strlen(fn) + 1;
704
705 if (spl_debug_binary)
706 known_size += sizeof(header);
707
708 /* '2' used because vsnprintf returns real size required for output
709 * _without_ terminating NULL. */
710 for (i = 0; i < 2; i++) {
711 tage = trace_get_tage(tcd, needed + known_size + 1);
712 if (tage == NULL) {
713 if (needed + known_size > PAGE_SIZE)
714 mask |= SD_ERROR;
715
716 trace_put_tcd(tcd);
717 tcd = NULL;
718 goto console;
719 }
720
721 string_buf = (char *)page_address(tage->page) +
722 tage->used + known_size;
723
724 max_nob = PAGE_SIZE - tage->used - known_size;
725 if (max_nob <= 0) {
726 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
727 mask |= SD_ERROR;
728 trace_put_tcd(tcd);
729 tcd = NULL;
730 goto console;
731 }
732
733 needed = 0;
734 if (format) {
735 va_start(ap, format);
736 needed += vsnprintf(string_buf, max_nob, format, ap);
737 va_end(ap);
738 }
739
740 if (needed < max_nob)
741 break;
742 }
743
744 header.ph_len = known_size + needed;
745 debug_buf = (char *)page_address(tage->page) + tage->used;
746
747 if (spl_debug_binary) {
748 memcpy(debug_buf, &header, sizeof(header));
749 tage->used += sizeof(header);
750 debug_buf += sizeof(header);
751 }
752
753 strcpy(debug_buf, file);
754 tage->used += strlen(file) + 1;
755 debug_buf += strlen(file) + 1;
756
757 if (fn) {
758 strcpy(debug_buf, fn);
759 tage->used += strlen(fn) + 1;
760 debug_buf += strlen(fn) + 1;
761 }
762
763 __ASSERT(debug_buf == string_buf);
764
765 tage->used += needed;
766 __ASSERT (tage->used <= PAGE_SIZE);
767
768 console:
769 if ((mask & spl_debug_printk) == 0) {
770 /* no console output requested */
771 if (tcd != NULL)
772 trace_put_tcd(tcd);
773 return 1;
774 }
775
776 if (cdls != NULL) {
777 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
778 !time_before(cdls->cdls_next, jiffies)) {
779 /* skipping a console message */
780 cdls->cdls_count++;
781 if (tcd != NULL)
782 trace_put_tcd(tcd);
783 return 1;
784 }
785
786 if (time_before(cdls->cdls_next + spl_console_max_delay +
787 (10 * HZ), jiffies)) {
788 /* last timeout was a long time ago */
789 cdls->cdls_delay /= spl_console_backoff * 4;
790 } else {
791 cdls->cdls_delay *= spl_console_backoff;
792
793 if (cdls->cdls_delay < spl_console_min_delay)
794 cdls->cdls_delay = spl_console_min_delay;
795 else if (cdls->cdls_delay > spl_console_max_delay)
796 cdls->cdls_delay = spl_console_max_delay;
797 }
798
799 /* ensure cdls_next is never zero after it's been seen */
800 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
801 }
802
803 if (tcd != NULL) {
804 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
805 trace_put_tcd(tcd);
806 } else {
807 string_buf = trace_get_console_buffer();
808
809 needed = 0;
810 if (format != NULL) {
811 va_start(ap, format);
812 needed += vsnprintf(string_buf,
813 TRACE_CONSOLE_BUFFER_SIZE, format, ap);
814 va_end(ap);
815 }
816 trace_print_to_console(&header, mask,
817 string_buf, needed, file, fn);
818
819 trace_put_console_buffer(string_buf);
820 }
821
822 if (cdls != NULL && cdls->cdls_count != 0) {
823 string_buf = trace_get_console_buffer();
824
825 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
826 "Skipped %d previous similar message%s\n",
827 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
828
829 trace_print_to_console(&header, mask,
830 string_buf, needed, file, fn);
831
832 trace_put_console_buffer(string_buf);
833 cdls->cdls_count = 0;
834 }
835
836 return 0;
837 }
838 EXPORT_SYMBOL(spl_debug_msg);
839
840 /* Do the collect_pages job on a single CPU: assumes that all other
841 * CPUs have been stopped during a panic. If this isn't true for
842 * some arch, this will have to be implemented separately in each arch.
843 */
844 static void
845 collect_pages_from_single_cpu(struct page_collection *pc)
846 {
847 struct trace_cpu_data *tcd;
848 int i, j;
849
850 tcd_for_each(tcd, i, j) {
851 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
852 tcd->tcd_cur_pages = 0;
853 }
854 }
855
856 static void
857 collect_pages_on_all_cpus(struct page_collection *pc)
858 {
859 struct trace_cpu_data *tcd;
860 int i, cpu;
861
862 spin_lock(&pc->pc_lock);
863 for_each_possible_cpu(cpu) {
864 tcd_for_each_type_lock(tcd, i, cpu) {
865 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
866 tcd->tcd_cur_pages = 0;
867 }
868 }
869 spin_unlock(&pc->pc_lock);
870 }
871
872 static void
873 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
874 {
875 INIT_LIST_HEAD(&pc->pc_pages);
876
877 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
878 collect_pages_from_single_cpu(pc);
879 else
880 collect_pages_on_all_cpus(pc);
881 }
882
883 static void
884 put_pages_back_on_all_cpus(struct page_collection *pc)
885 {
886 struct trace_cpu_data *tcd;
887 struct list_head *cur_head;
888 struct trace_page *tage;
889 struct trace_page *tmp;
890 int i, cpu;
891
892 spin_lock(&pc->pc_lock);
893
894 for_each_possible_cpu(cpu) {
895 tcd_for_each_type_lock(tcd, i, cpu) {
896 cur_head = tcd->tcd_pages.next;
897
898 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
899 linkage) {
900 if (tage->cpu != cpu || tage->type != i)
901 continue;
902
903 tage_to_tail(tage, cur_head);
904 tcd->tcd_cur_pages++;
905 }
906 }
907 }
908
909 spin_unlock(&pc->pc_lock);
910 }
911
912 static void
913 put_pages_back(struct page_collection *pc)
914 {
915 if (!spl_panic_in_progress)
916 put_pages_back_on_all_cpus(pc);
917 }
918
919 static int
920 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
921 {
922 struct page_collection pc;
923 struct file *filp;
924 struct trace_page *tage;
925 struct trace_page *tmp;
926 mm_segment_t oldfs;
927 int rc = 0;
928
929 down_write(&trace_sem);
930
931 filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
932 0600, &rc);
933 if (filp == NULL) {
934 if (rc != -EEXIST)
935 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
936 filename, rc);
937 goto out;
938 }
939
940 spin_lock_init(&pc.pc_lock);
941 collect_pages(dp, &pc);
942 if (list_empty(&pc.pc_pages)) {
943 rc = 0;
944 goto close;
945 }
946
947 oldfs = get_fs();
948 set_fs(get_ds());
949
950 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
951 rc = spl_filp_write(filp, page_address(tage->page),
952 tage->used, spl_filp_poff(filp));
953 if (rc != (int)tage->used) {
954 printk(KERN_WARNING "SPL: Wanted to write %u "
955 "but wrote %d\n", tage->used, rc);
956 put_pages_back(&pc);
957 __ASSERT(list_empty(&pc.pc_pages));
958 break;
959 }
960 list_del(&tage->linkage);
961 tage_free(tage);
962 }
963
964 set_fs(oldfs);
965
966 rc = spl_filp_fsync(filp, 1);
967 if (rc)
968 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
969 close:
970 spl_filp_close(filp);
971 out:
972 up_write(&trace_sem);
973
974 return rc;
975 }
976
977 static void
978 spl_debug_flush_pages(void)
979 {
980 dumplog_priv_t dp;
981 struct page_collection pc;
982 struct trace_page *tage;
983 struct trace_page *tmp;
984
985 spin_lock_init(&pc.pc_lock);
986 init_waitqueue_head(&dp.dp_waitq);
987 dp.dp_pid = current->pid;
988 dp.dp_flags = 0;
989 atomic_set(&dp.dp_done, 0);
990
991 collect_pages(&dp, &pc);
992 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
993 list_del(&tage->linkage);
994 tage_free(tage);
995 }
996 }
997
998 unsigned long
999 spl_debug_set_mask(unsigned long mask) {
1000 spl_debug_mask = mask;
1001 return 0;
1002 }
1003 EXPORT_SYMBOL(spl_debug_set_mask);
1004
1005 unsigned long
1006 spl_debug_get_mask(void) {
1007 return spl_debug_mask;
1008 }
1009 EXPORT_SYMBOL(spl_debug_get_mask);
1010
1011 unsigned long
1012 spl_debug_set_subsys(unsigned long subsys) {
1013 spl_debug_subsys = subsys;
1014 return 0;
1015 }
1016 EXPORT_SYMBOL(spl_debug_set_subsys);
1017
1018 unsigned long
1019 spl_debug_get_subsys(void) {
1020 return spl_debug_subsys;
1021 }
1022 EXPORT_SYMBOL(spl_debug_get_subsys);
1023
1024 int
1025 spl_debug_set_mb(int mb)
1026 {
1027 int i, j, pages;
1028 int limit = trace_max_debug_mb();
1029 struct trace_cpu_data *tcd;
1030
1031 if (mb < num_possible_cpus()) {
1032 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1033 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1034 return -EINVAL;
1035 }
1036
1037 if (mb > limit) {
1038 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1039 "%dMB - upper limit is %d\n", mb, limit);
1040 return -EINVAL;
1041 }
1042
1043 mb /= num_possible_cpus();
1044 pages = mb << (20 - PAGE_SHIFT);
1045
1046 down_write(&trace_sem);
1047
1048 tcd_for_each(tcd, i, j)
1049 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1050
1051 up_write(&trace_sem);
1052
1053 return 0;
1054 }
1055 EXPORT_SYMBOL(spl_debug_set_mb);
1056
1057 int
1058 spl_debug_get_mb(void)
1059 {
1060 int i, j;
1061 struct trace_cpu_data *tcd;
1062 int total_pages = 0;
1063
1064 down_read(&trace_sem);
1065
1066 tcd_for_each(tcd, i, j)
1067 total_pages += tcd->tcd_max_pages;
1068
1069 up_read(&trace_sem);
1070
1071 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1072 }
1073 EXPORT_SYMBOL(spl_debug_get_mb);
1074
1075 void spl_debug_dumpstack(struct task_struct *tsk)
1076 {
1077 extern void show_task(struct task_struct *);
1078
1079 if (tsk == NULL)
1080 tsk = current;
1081
1082 printk("SPL: Showing stack for process %d\n", tsk->pid);
1083 dump_stack();
1084 }
1085 EXPORT_SYMBOL(spl_debug_dumpstack);
1086
1087 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1088 {
1089 spl_debug_catastrophe = 1;
1090 spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
1091
1092 if (in_interrupt())
1093 panic("SPL PANIC in interrupt.\n");
1094
1095 if (in_atomic() || irqs_disabled())
1096 flags |= DL_NOTHREAD;
1097
1098 /* Ensure all debug pages and dumped by current cpu */
1099 if (spl_debug_panic_on_bug)
1100 spl_panic_in_progress = 1;
1101
1102 spl_debug_dumpstack(NULL);
1103
1104 if (spl_debug_panic_on_bug) {
1105 spl_debug_dumplog(flags);
1106 panic("SPL PANIC");
1107 }
1108
1109 set_task_state(current, TASK_UNINTERRUPTIBLE);
1110 while (1)
1111 schedule();
1112 }
1113 EXPORT_SYMBOL(spl_debug_bug);
1114
1115 int
1116 spl_debug_clear_buffer(void)
1117 {
1118 spl_debug_flush_pages();
1119 return 0;
1120 }
1121 EXPORT_SYMBOL(spl_debug_clear_buffer);
1122
1123 int
1124 spl_debug_mark_buffer(char *text)
1125 {
1126 SDEBUG(SD_WARNING, "*************************************\n");
1127 SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
1128 SDEBUG(SD_WARNING, "*************************************\n");
1129
1130 return 0;
1131 }
1132 EXPORT_SYMBOL(spl_debug_mark_buffer);
1133
1134 static int
1135 trace_init(int max_pages)
1136 {
1137 struct trace_cpu_data *tcd;
1138 int i, j;
1139
1140 init_rwsem(&trace_sem);
1141
1142 /* initialize trace_data */
1143 memset(trace_data, 0, sizeof(trace_data));
1144 for (i = 0; i < TCD_TYPE_MAX; i++) {
1145 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1146 NR_CPUS, GFP_KERNEL);
1147 if (trace_data[i] == NULL)
1148 goto out;
1149 }
1150
1151 tcd_for_each(tcd, i, j) {
1152 spin_lock_init(&tcd->tcd_lock);
1153 tcd->tcd_pages_factor = pages_factor[i];
1154 tcd->tcd_type = i;
1155 tcd->tcd_cpu = j;
1156 INIT_LIST_HEAD(&tcd->tcd_pages);
1157 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1158 tcd->tcd_cur_pages = 0;
1159 tcd->tcd_cur_stock_pages = 0;
1160 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1161 tcd->tcd_shutting_down = 0;
1162 }
1163
1164 for (i = 0; i < num_possible_cpus(); i++) {
1165 for (j = 0; j < 3; j++) {
1166 trace_console_buffers[i][j] =
1167 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1168 GFP_KERNEL);
1169
1170 if (trace_console_buffers[i][j] == NULL)
1171 goto out;
1172 }
1173 }
1174
1175 return 0;
1176 out:
1177 trace_fini();
1178 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1179 return -ENOMEM;
1180 }
1181
1182 int
1183 spl_debug_init(void)
1184 {
1185 int rc, max = spl_debug_mb;
1186
1187 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1188 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1189
1190 /* If spl_debug_mb is set to an invalid value or uninitialized
1191 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1192 if (max > (totalram_pages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1193 max >= 512 || max < 0) {
1194 max = TCD_MAX_PAGES;
1195 } else {
1196 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1197 }
1198
1199 rc = trace_init(max);
1200 if (rc)
1201 return rc;
1202
1203 return rc;
1204 }
1205
1206 static void
1207 trace_cleanup_on_all_cpus(void)
1208 {
1209 struct trace_cpu_data *tcd;
1210 struct trace_page *tage;
1211 struct trace_page *tmp;
1212 int i, cpu;
1213
1214 for_each_possible_cpu(cpu) {
1215 tcd_for_each_type_lock(tcd, i, cpu) {
1216 tcd->tcd_shutting_down = 1;
1217
1218 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1219 linkage) {
1220 list_del(&tage->linkage);
1221 tage_free(tage);
1222 }
1223 tcd->tcd_cur_pages = 0;
1224 }
1225 }
1226 }
1227
1228 static void
1229 trace_fini(void)
1230 {
1231 int i, j;
1232
1233 trace_cleanup_on_all_cpus();
1234
1235 for (i = 0; i < num_possible_cpus(); i++) {
1236 for (j = 0; j < 3; j++) {
1237 if (trace_console_buffers[i][j] != NULL) {
1238 kfree(trace_console_buffers[i][j]);
1239 trace_console_buffers[i][j] = NULL;
1240 }
1241 }
1242 }
1243
1244 for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
1245 kfree(trace_data[i]);
1246 trace_data[i] = NULL;
1247 }
1248 }
1249
1250 void
1251 spl_debug_fini(void)
1252 {
1253 trace_fini();
1254 }
1255
1256 #endif /* DEBUG_LOG */