1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Debug Implementation.
25 \*****************************************************************************/
27 #include <linux/kmod.h>
29 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/kthread.h>
34 #include <linux/hardirq.h>
35 #include <linux/interrupt.h>
36 #include <linux/spinlock.h>
37 #include <linux/proc_compat.h>
38 #include <linux/file_compat.h>
39 #include <linux/swap.h>
40 #include <linux/ratelimit.h>
41 #include <sys/sysmacros.h>
42 #include <sys/thread.h>
43 #include <spl-debug.h>
44 #include <spl-trace.h>
47 #ifdef SS_DEBUG_SUBSYS
48 #undef SS_DEBUG_SUBSYS
51 #define SS_DEBUG_SUBSYS SS_DEBUG
53 /* Debug log support enabled */
56 unsigned long spl_debug_subsys
= ~0;
57 EXPORT_SYMBOL(spl_debug_subsys
);
58 module_param(spl_debug_subsys
, ulong
, 0644);
59 MODULE_PARM_DESC(spl_debug_subsys
, "Subsystem debugging level mask.");
61 unsigned long spl_debug_mask
= SD_CANTMASK
;
62 EXPORT_SYMBOL(spl_debug_mask
);
63 module_param(spl_debug_mask
, ulong
, 0644);
64 MODULE_PARM_DESC(spl_debug_mask
, "Debugging level mask.");
66 unsigned long spl_debug_printk
= SD_CANTMASK
;
67 EXPORT_SYMBOL(spl_debug_printk
);
68 module_param(spl_debug_printk
, ulong
, 0644);
69 MODULE_PARM_DESC(spl_debug_printk
, "Console printk level mask.");
71 int spl_debug_mb
= -1;
72 EXPORT_SYMBOL(spl_debug_mb
);
73 module_param(spl_debug_mb
, int, 0644);
74 MODULE_PARM_DESC(spl_debug_mb
, "Total debug buffer size.");
76 unsigned int spl_debug_binary
= 1;
77 EXPORT_SYMBOL(spl_debug_binary
);
79 unsigned int spl_debug_catastrophe
;
80 EXPORT_SYMBOL(spl_debug_catastrophe
);
82 unsigned int spl_debug_panic_on_bug
= 0;
83 EXPORT_SYMBOL(spl_debug_panic_on_bug
);
84 module_param(spl_debug_panic_on_bug
, uint
, 0644);
85 MODULE_PARM_DESC(spl_debug_panic_on_bug
, "Panic on BUG");
87 static char spl_debug_file_name
[PATH_MAX
];
88 char spl_debug_file_path
[PATH_MAX
] = "/tmp/spl-log";
90 unsigned int spl_console_ratelimit
= 1;
91 EXPORT_SYMBOL(spl_console_ratelimit
);
93 long spl_console_max_delay
;
94 EXPORT_SYMBOL(spl_console_max_delay
);
96 long spl_console_min_delay
;
97 EXPORT_SYMBOL(spl_console_min_delay
);
99 unsigned int spl_console_backoff
= SPL_DEFAULT_BACKOFF
;
100 EXPORT_SYMBOL(spl_console_backoff
);
102 unsigned int spl_debug_stack
;
103 EXPORT_SYMBOL(spl_debug_stack
);
105 static int spl_panic_in_progress
;
107 union trace_data_union (*trace_data
[TCD_TYPE_MAX
])[NR_CPUS
] __cacheline_aligned
;
108 char *trace_console_buffers
[NR_CPUS
][3];
109 struct rw_semaphore trace_sem
;
110 atomic_t trace_tage_allocated
= ATOMIC_INIT(0);
112 static int spl_debug_dump_all_pages(dumplog_priv_t
*dp
, char *);
113 static void trace_fini(void);
116 /* Memory percentage breakdown by type */
117 static unsigned int pages_factor
[TCD_TYPE_MAX
] = {
118 80, /* 80% pages for TCD_TYPE_PROC */
119 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
120 10 /* 10% pages for TCD_TYPE_IRQ */
124 spl_debug_subsys2str(int subsys
)
191 spl_debug_dbg2str(int debug
)
218 spl_debug_mask2str(char *str
, int size
, unsigned long mask
, int is_subsys
)
220 const char *(*fn
)(int bit
) = is_subsys
? spl_debug_subsys2str
:
225 if (mask
== 0) { /* "0" */
229 } else { /* space-separated tokens */
230 for (i
= 0; i
< 32; i
++) {
233 if ((mask
& bit
) == 0)
237 if (token
== NULL
) /* unused bit */
240 if (len
> 0) { /* separator? */
246 while (*token
!= 0) {
255 /* terminate 'str' */
265 spl_debug_token2mask(int *mask
, const char *str
, int len
, int is_subsys
)
267 const char *(*fn
)(int bit
) = is_subsys
? spl_debug_subsys2str
:
272 /* match against known tokens */
273 for (i
= 0; i
< 32; i
++) {
277 if (token
== NULL
) /* unused? */
282 if (j
== len
) { /* end of token */
293 if (str
[j
] == token
[j
])
296 if (str
[j
] < 'A' || 'Z' < str
[j
])
299 if (str
[j
] - 'A' + 'a' != token
[j
])
304 return -EINVAL
; /* no match */
308 spl_debug_str2mask(unsigned long *mask
, const char *str
, int is_subsys
)
311 int m
= 0, matched
, n
, t
;
313 /* Allow a number for backwards compatibility */
314 for (n
= strlen(str
); n
> 0; n
--)
315 if (!isspace(str
[n
-1]))
319 if ((t
= sscanf(str
, "%i%n", &m
, &matched
)) >= 1 && matched
== n
) {
324 /* <str> must be a list of debug tokens or numbers separated by
325 * whitespace and optionally an operator ('+' or '-'). If an operator
326 * appears first in <str>, '*mask' is used as the starting point
327 * (relative), otherwise 0 is used (absolute). An operator applies to
328 * all following tokens up to the next operator. */
331 while (isspace(*str
)) /* skip whitespace */
337 if (*str
== '+' || *str
== '-') {
340 /* op on first token == relative */
344 while (isspace(*str
)) /* skip whitespace */
347 if (*str
== 0) /* trailing op */
351 /* find token length */
352 for (n
= 0; str
[n
] != 0 && !isspace(str
[n
]); n
++);
355 if (spl_debug_token2mask(&t
, str
, n
, is_subsys
) != 0)
375 spl_debug_dumplog_internal(dumplog_priv_t
*dp
)
379 journal_info
= current
->journal_info
;
380 current
->journal_info
= NULL
;
382 snprintf(spl_debug_file_name
, sizeof(spl_debug_file_path
) - 1,
383 "%s.%ld.%ld", spl_debug_file_path
,
384 get_seconds(), (long)dp
->dp_pid
);
385 printk("SPL: Dumping log to %s\n", spl_debug_file_name
);
386 spl_debug_dump_all_pages(dp
, spl_debug_file_name
);
388 current
->journal_info
= journal_info
;
392 spl_debug_dumplog_thread(void *arg
)
394 dumplog_priv_t
*dp
= (dumplog_priv_t
*)arg
;
396 spl_debug_dumplog_internal(dp
);
397 atomic_set(&dp
->dp_done
, 1);
398 wake_up(&dp
->dp_waitq
);
399 complete_and_exit(NULL
, 0);
401 return 0; /* Unreachable */
404 /* When flag is set do not use a new thread for the debug dump */
406 spl_debug_dumplog(int flags
)
408 struct task_struct
*tsk
;
411 init_waitqueue_head(&dp
.dp_waitq
);
412 dp
.dp_pid
= current
->pid
;
414 atomic_set(&dp
.dp_done
, 0);
416 if (dp
.dp_flags
& DL_NOTHREAD
) {
417 spl_debug_dumplog_internal(&dp
);
420 tsk
= spl_kthread_create(spl_debug_dumplog_thread
,(void *)&dp
,"spl_debug");
424 wake_up_process(tsk
);
425 wait_event(dp
.dp_waitq
, atomic_read(&dp
.dp_done
));
430 EXPORT_SYMBOL(spl_debug_dumplog
);
433 trace_get_console_buffer(void)
440 } else if (in_softirq()) {
446 return trace_console_buffers
[cpu
][idx
];
450 trace_put_console_buffer(char *buffer
)
456 trace_lock_tcd(struct trace_cpu_data
*tcd
)
458 __ASSERT(tcd
->tcd_type
< TCD_TYPE_MAX
);
460 spin_lock_irqsave(&tcd
->tcd_lock
, tcd
->tcd_lock_flags
);
466 trace_unlock_tcd(struct trace_cpu_data
*tcd
)
468 __ASSERT(tcd
->tcd_type
< TCD_TYPE_MAX
);
470 spin_unlock_irqrestore(&tcd
->tcd_lock
, tcd
->tcd_lock_flags
);
473 static struct trace_cpu_data
*
477 struct trace_cpu_data
*tcd
;
481 tcd
= &(*trace_data
[TCD_TYPE_IRQ
])[cpu
].tcd
;
482 else if (in_softirq())
483 tcd
= &(*trace_data
[TCD_TYPE_SOFTIRQ
])[cpu
].tcd
;
485 tcd
= &(*trace_data
[TCD_TYPE_PROC
])[cpu
].tcd
;
493 trace_put_tcd (struct trace_cpu_data
*tcd
)
495 trace_unlock_tcd(tcd
);
501 trace_set_debug_header(struct spl_debug_header
*header
, int subsys
,
502 int mask
, const int line
, unsigned long stack
)
506 do_gettimeofday(&tv
);
508 header
->ph_subsys
= subsys
;
509 header
->ph_mask
= mask
;
510 header
->ph_cpu_id
= smp_processor_id();
511 header
->ph_sec
= (__u32
)tv
.tv_sec
;
512 header
->ph_usec
= tv
.tv_usec
;
513 header
->ph_stack
= stack
;
514 header
->ph_pid
= current
->pid
;
515 header
->ph_line_num
= line
;
521 trace_print_to_console(struct spl_debug_header
*hdr
, int mask
, const char *buf
,
522 int len
, const char *file
, const char *fn
)
524 char *prefix
= "SPL", *ptype
= NULL
;
526 if ((mask
& SD_EMERG
) != 0) {
529 } else if ((mask
& SD_ERROR
) != 0) {
532 } else if ((mask
& SD_WARNING
) != 0) {
534 ptype
= KERN_WARNING
;
535 } else if ((mask
& (SD_CONSOLE
| spl_debug_printk
)) != 0) {
540 if ((mask
& SD_CONSOLE
) != 0) {
541 printk("%s%s: %.*s", ptype
, prefix
, len
, buf
);
543 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype
, prefix
,
544 hdr
->ph_pid
, hdr
->ph_stack
, file
,
545 hdr
->ph_line_num
, fn
, len
, buf
);
552 trace_max_debug_mb(void)
554 return MAX(512, ((totalram_pages
>> (20 - PAGE_SHIFT
)) * 80) / 100);
557 static struct trace_page
*
561 struct trace_page
*tage
;
563 page
= alloc_pages(gfp
| __GFP_NOWARN
, 0);
567 tage
= kmalloc(sizeof(*tage
), gfp
);
569 __free_pages(page
, 0);
574 atomic_inc(&trace_tage_allocated
);
580 tage_free(struct trace_page
*tage
)
582 __ASSERT(tage
!= NULL
);
583 __ASSERT(tage
->page
!= NULL
);
585 __free_pages(tage
->page
, 0);
587 atomic_dec(&trace_tage_allocated
);
590 static struct trace_page
*
591 tage_from_list(struct list_head
*list
)
593 return list_entry(list
, struct trace_page
, linkage
);
597 tage_to_tail(struct trace_page
*tage
, struct list_head
*queue
)
599 __ASSERT(tage
!= NULL
);
600 __ASSERT(queue
!= NULL
);
602 list_move_tail(&tage
->linkage
, queue
);
605 /* try to return a page that has 'len' bytes left at the end */
606 static struct trace_page
*
607 trace_get_tage_try(struct trace_cpu_data
*tcd
, unsigned long len
)
609 struct trace_page
*tage
;
611 if (tcd
->tcd_cur_pages
> 0) {
612 __ASSERT(!list_empty(&tcd
->tcd_pages
));
613 tage
= tage_from_list(tcd
->tcd_pages
.prev
);
614 if (tage
->used
+ len
<= PAGE_SIZE
)
618 if (tcd
->tcd_cur_pages
< tcd
->tcd_max_pages
) {
619 if (tcd
->tcd_cur_stock_pages
> 0) {
620 tage
= tage_from_list(tcd
->tcd_stock_pages
.prev
);
621 tcd
->tcd_cur_stock_pages
--;
622 list_del_init(&tage
->linkage
);
624 tage
= tage_alloc(GFP_ATOMIC
);
627 "failure to allocate a tage (%ld)\n",
634 tage
->cpu
= smp_processor_id();
635 tage
->type
= tcd
->tcd_type
;
636 list_add_tail(&tage
->linkage
, &tcd
->tcd_pages
);
637 tcd
->tcd_cur_pages
++;
645 /* return a page that has 'len' bytes left at the end */
646 static struct trace_page
*
647 trace_get_tage(struct trace_cpu_data
*tcd
, unsigned long len
)
649 struct trace_page
*tage
;
651 __ASSERT(len
<= PAGE_SIZE
);
653 tage
= trace_get_tage_try(tcd
, len
);
657 if (tcd
->tcd_cur_pages
> 0) {
658 tage
= tage_from_list(tcd
->tcd_pages
.next
);
660 tage_to_tail(tage
, &tcd
->tcd_pages
);
667 spl_debug_msg(void *arg
, int subsys
, int mask
, const char *file
,
668 const char *fn
, const int line
, const char *format
, ...)
670 spl_debug_limit_state_t
*cdls
= arg
;
671 struct trace_cpu_data
*tcd
= NULL
;
672 struct spl_debug_header header
= { 0, };
673 struct trace_page
*tage
;
674 /* string_buf is used only if tcd != NULL, and is always set then */
675 char *string_buf
= NULL
;
678 int needed
= 85; /* average message length */
684 subsys
= SS_DEBUG_SUBSYS
;
689 if (strchr(file
, '/'))
690 file
= strrchr(file
, '/') + 1;
692 tcd
= trace_get_tcd();
693 trace_set_debug_header(&header
, subsys
, mask
, line
, 0);
697 if (tcd
->tcd_shutting_down
) {
703 known_size
= strlen(file
) + 1;
705 known_size
+= strlen(fn
) + 1;
707 if (spl_debug_binary
)
708 known_size
+= sizeof(header
);
710 /* '2' used because vsnprintf returns real size required for output
711 * _without_ terminating NULL. */
712 for (i
= 0; i
< 2; i
++) {
713 tage
= trace_get_tage(tcd
, needed
+ known_size
+ 1);
715 if (needed
+ known_size
> PAGE_SIZE
)
723 string_buf
= (char *)page_address(tage
->page
) +
724 tage
->used
+ known_size
;
726 max_nob
= PAGE_SIZE
- tage
->used
- known_size
;
728 printk(KERN_EMERG
"negative max_nob: %i\n", max_nob
);
737 va_start(ap
, format
);
738 needed
+= vsnprintf(string_buf
, max_nob
, format
, ap
);
742 if (needed
< max_nob
)
746 header
.ph_len
= known_size
+ needed
;
747 debug_buf
= (char *)page_address(tage
->page
) + tage
->used
;
749 if (spl_debug_binary
) {
750 memcpy(debug_buf
, &header
, sizeof(header
));
751 tage
->used
+= sizeof(header
);
752 debug_buf
+= sizeof(header
);
755 strcpy(debug_buf
, file
);
756 tage
->used
+= strlen(file
) + 1;
757 debug_buf
+= strlen(file
) + 1;
760 strcpy(debug_buf
, fn
);
761 tage
->used
+= strlen(fn
) + 1;
762 debug_buf
+= strlen(fn
) + 1;
765 __ASSERT(debug_buf
== string_buf
);
767 tage
->used
+= needed
;
768 __ASSERT (tage
->used
<= PAGE_SIZE
);
771 if ((mask
& spl_debug_printk
) == 0) {
772 /* no console output requested */
779 if (spl_console_ratelimit
&& cdls
->cdls_next
!= 0 &&
780 !time_before(cdls
->cdls_next
, jiffies
)) {
781 /* skipping a console message */
788 if (time_before(cdls
->cdls_next
+ spl_console_max_delay
+
789 (10 * HZ
), jiffies
)) {
790 /* last timeout was a long time ago */
791 cdls
->cdls_delay
/= spl_console_backoff
* 4;
793 cdls
->cdls_delay
*= spl_console_backoff
;
795 if (cdls
->cdls_delay
< spl_console_min_delay
)
796 cdls
->cdls_delay
= spl_console_min_delay
;
797 else if (cdls
->cdls_delay
> spl_console_max_delay
)
798 cdls
->cdls_delay
= spl_console_max_delay
;
801 /* ensure cdls_next is never zero after it's been seen */
802 cdls
->cdls_next
= (jiffies
+ cdls
->cdls_delay
) | 1;
806 trace_print_to_console(&header
, mask
, string_buf
, needed
, file
, fn
);
809 string_buf
= trace_get_console_buffer();
812 if (format
!= NULL
) {
813 va_start(ap
, format
);
814 needed
+= vsnprintf(string_buf
,
815 TRACE_CONSOLE_BUFFER_SIZE
, format
, ap
);
818 trace_print_to_console(&header
, mask
,
819 string_buf
, needed
, file
, fn
);
821 trace_put_console_buffer(string_buf
);
824 if (cdls
!= NULL
&& cdls
->cdls_count
!= 0) {
825 string_buf
= trace_get_console_buffer();
827 needed
= snprintf(string_buf
, TRACE_CONSOLE_BUFFER_SIZE
,
828 "Skipped %d previous similar message%s\n",
829 cdls
->cdls_count
, (cdls
->cdls_count
> 1) ? "s" : "");
831 trace_print_to_console(&header
, mask
,
832 string_buf
, needed
, file
, fn
);
834 trace_put_console_buffer(string_buf
);
835 cdls
->cdls_count
= 0;
840 EXPORT_SYMBOL(spl_debug_msg
);
842 /* Do the collect_pages job on a single CPU: assumes that all other
843 * CPUs have been stopped during a panic. If this isn't true for
844 * some arch, this will have to be implemented separately in each arch.
847 collect_pages_from_single_cpu(struct page_collection
*pc
)
849 struct trace_cpu_data
*tcd
;
852 tcd_for_each(tcd
, i
, j
) {
853 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
854 tcd
->tcd_cur_pages
= 0;
859 collect_pages_on_all_cpus(struct page_collection
*pc
)
861 struct trace_cpu_data
*tcd
;
864 spin_lock(&pc
->pc_lock
);
865 for_each_possible_cpu(cpu
) {
866 tcd_for_each_type_lock(tcd
, i
, cpu
) {
867 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
868 tcd
->tcd_cur_pages
= 0;
871 spin_unlock(&pc
->pc_lock
);
875 collect_pages(dumplog_priv_t
*dp
, struct page_collection
*pc
)
877 INIT_LIST_HEAD(&pc
->pc_pages
);
879 if (spl_panic_in_progress
|| dp
->dp_flags
& DL_SINGLE_CPU
)
880 collect_pages_from_single_cpu(pc
);
882 collect_pages_on_all_cpus(pc
);
886 put_pages_back_on_all_cpus(struct page_collection
*pc
)
888 struct trace_cpu_data
*tcd
;
889 struct list_head
*cur_head
;
890 struct trace_page
*tage
;
891 struct trace_page
*tmp
;
894 spin_lock(&pc
->pc_lock
);
896 for_each_possible_cpu(cpu
) {
897 tcd_for_each_type_lock(tcd
, i
, cpu
) {
898 cur_head
= tcd
->tcd_pages
.next
;
900 list_for_each_entry_safe(tage
, tmp
, &pc
->pc_pages
,
902 if (tage
->cpu
!= cpu
|| tage
->type
!= i
)
905 tage_to_tail(tage
, cur_head
);
906 tcd
->tcd_cur_pages
++;
911 spin_unlock(&pc
->pc_lock
);
915 put_pages_back(struct page_collection
*pc
)
917 if (!spl_panic_in_progress
)
918 put_pages_back_on_all_cpus(pc
);
922 spl_debug_dump_all_pages(dumplog_priv_t
*dp
, char *filename
)
924 struct page_collection pc
;
926 struct trace_page
*tage
;
927 struct trace_page
*tmp
;
931 down_write(&trace_sem
);
933 filp
= spl_filp_open(filename
, O_CREAT
|O_EXCL
|O_WRONLY
|O_LARGEFILE
,
937 printk(KERN_ERR
"SPL: Can't open %s for dump: %d\n",
942 spin_lock_init(&pc
.pc_lock
);
943 collect_pages(dp
, &pc
);
944 if (list_empty(&pc
.pc_pages
)) {
952 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
953 rc
= spl_filp_write(filp
, page_address(tage
->page
),
954 tage
->used
, spl_filp_poff(filp
));
955 if (rc
!= (int)tage
->used
) {
956 printk(KERN_WARNING
"SPL: Wanted to write %u "
957 "but wrote %d\n", tage
->used
, rc
);
959 __ASSERT(list_empty(&pc
.pc_pages
));
962 list_del(&tage
->linkage
);
968 rc
= spl_filp_fsync(filp
, 1);
970 printk(KERN_ERR
"SPL: Unable to sync: %d\n", rc
);
972 spl_filp_close(filp
);
974 up_write(&trace_sem
);
980 spl_debug_flush_pages(void)
983 struct page_collection pc
;
984 struct trace_page
*tage
;
985 struct trace_page
*tmp
;
987 spin_lock_init(&pc
.pc_lock
);
988 init_waitqueue_head(&dp
.dp_waitq
);
989 dp
.dp_pid
= current
->pid
;
991 atomic_set(&dp
.dp_done
, 0);
993 collect_pages(&dp
, &pc
);
994 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
995 list_del(&tage
->linkage
);
1001 spl_debug_set_mask(unsigned long mask
) {
1002 spl_debug_mask
= mask
;
1005 EXPORT_SYMBOL(spl_debug_set_mask
);
1008 spl_debug_get_mask(void) {
1009 return spl_debug_mask
;
1011 EXPORT_SYMBOL(spl_debug_get_mask
);
1014 spl_debug_set_subsys(unsigned long subsys
) {
1015 spl_debug_subsys
= subsys
;
1018 EXPORT_SYMBOL(spl_debug_set_subsys
);
1021 spl_debug_get_subsys(void) {
1022 return spl_debug_subsys
;
1024 EXPORT_SYMBOL(spl_debug_get_subsys
);
1027 spl_debug_set_mb(int mb
)
1030 int limit
= trace_max_debug_mb();
1031 struct trace_cpu_data
*tcd
;
1033 if (mb
< num_possible_cpus()) {
1034 printk(KERN_ERR
"SPL: Refusing to set debug buffer size to "
1035 "%dMB - lower limit is %d\n", mb
, num_possible_cpus());
1040 printk(KERN_ERR
"SPL: Refusing to set debug buffer size to "
1041 "%dMB - upper limit is %d\n", mb
, limit
);
1045 mb
/= num_possible_cpus();
1046 pages
= mb
<< (20 - PAGE_SHIFT
);
1048 down_write(&trace_sem
);
1050 tcd_for_each(tcd
, i
, j
)
1051 tcd
->tcd_max_pages
= (pages
* tcd
->tcd_pages_factor
) / 100;
1053 up_write(&trace_sem
);
1057 EXPORT_SYMBOL(spl_debug_set_mb
);
1060 spl_debug_get_mb(void)
1063 struct trace_cpu_data
*tcd
;
1064 int total_pages
= 0;
1066 down_read(&trace_sem
);
1068 tcd_for_each(tcd
, i
, j
)
1069 total_pages
+= tcd
->tcd_max_pages
;
1071 up_read(&trace_sem
);
1073 return (total_pages
>> (20 - PAGE_SHIFT
)) + 1;
1075 EXPORT_SYMBOL(spl_debug_get_mb
);
1078 * Limit the number of stack traces dumped to not more than 5 every
1079 * 60 seconds to prevent denial-of-service attacks from debug code.
1081 DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state
, 60 * HZ
, 5);
1084 spl_debug_dumpstack(struct task_struct
*tsk
)
1086 if (__ratelimit(&dumpstack_ratelimit_state
)) {
1090 printk("SPL: Showing stack for process %d\n", tsk
->pid
);
1094 EXPORT_SYMBOL(spl_debug_dumpstack
);
1096 void spl_debug_bug(char *file
, const char *func
, const int line
, int flags
)
1098 spl_debug_catastrophe
= 1;
1099 spl_debug_msg(NULL
, 0, SD_EMERG
, file
, func
, line
, "SPL PANIC\n");
1102 panic("SPL PANIC in interrupt.\n");
1104 if (in_atomic() || irqs_disabled())
1105 flags
|= DL_NOTHREAD
;
1107 /* Ensure all debug pages and dumped by current cpu */
1108 if (spl_debug_panic_on_bug
)
1109 spl_panic_in_progress
= 1;
1111 spl_debug_dumpstack(NULL
);
1113 if (spl_debug_panic_on_bug
) {
1114 spl_debug_dumplog(flags
);
1118 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
1122 EXPORT_SYMBOL(spl_debug_bug
);
1125 spl_debug_clear_buffer(void)
1127 spl_debug_flush_pages();
1130 EXPORT_SYMBOL(spl_debug_clear_buffer
);
1133 spl_debug_mark_buffer(char *text
)
1135 SDEBUG(SD_WARNING
, "*************************************\n");
1136 SDEBUG(SD_WARNING
, "DEBUG MARKER: %s\n", text
);
1137 SDEBUG(SD_WARNING
, "*************************************\n");
1141 EXPORT_SYMBOL(spl_debug_mark_buffer
);
1144 trace_init(int max_pages
)
1146 struct trace_cpu_data
*tcd
;
1149 init_rwsem(&trace_sem
);
1151 /* initialize trace_data */
1152 memset(trace_data
, 0, sizeof(trace_data
));
1153 for (i
= 0; i
< TCD_TYPE_MAX
; i
++) {
1154 trace_data
[i
] = kmalloc(sizeof(union trace_data_union
) *
1155 NR_CPUS
, GFP_KERNEL
);
1156 if (trace_data
[i
] == NULL
)
1160 tcd_for_each(tcd
, i
, j
) {
1161 spin_lock_init(&tcd
->tcd_lock
);
1162 tcd
->tcd_pages_factor
= pages_factor
[i
];
1165 INIT_LIST_HEAD(&tcd
->tcd_pages
);
1166 INIT_LIST_HEAD(&tcd
->tcd_stock_pages
);
1167 tcd
->tcd_cur_pages
= 0;
1168 tcd
->tcd_cur_stock_pages
= 0;
1169 tcd
->tcd_max_pages
= (max_pages
* pages_factor
[i
]) / 100;
1170 tcd
->tcd_shutting_down
= 0;
1173 for (i
= 0; i
< num_possible_cpus(); i
++) {
1174 for (j
= 0; j
< 3; j
++) {
1175 trace_console_buffers
[i
][j
] =
1176 kmalloc(TRACE_CONSOLE_BUFFER_SIZE
,
1179 if (trace_console_buffers
[i
][j
] == NULL
)
1187 printk(KERN_ERR
"SPL: Insufficient memory for debug logs\n");
1192 spl_debug_init(void)
1194 int rc
, max
= spl_debug_mb
;
1196 spl_console_max_delay
= SPL_DEFAULT_MAX_DELAY
;
1197 spl_console_min_delay
= SPL_DEFAULT_MIN_DELAY
;
1199 /* If spl_debug_mb is set to an invalid value or uninitialized
1200 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1201 if (max
> (totalram_pages
>> (20 - 2 - PAGE_SHIFT
)) / 5 ||
1202 max
>= 512 || max
< 0) {
1203 max
= TCD_MAX_PAGES
;
1205 max
= (max
/ num_online_cpus()) << (20 - PAGE_SHIFT
);
1208 rc
= trace_init(max
);
1216 trace_cleanup_on_all_cpus(void)
1218 struct trace_cpu_data
*tcd
;
1219 struct trace_page
*tage
;
1220 struct trace_page
*tmp
;
1223 for_each_possible_cpu(cpu
) {
1224 tcd_for_each_type_lock(tcd
, i
, cpu
) {
1225 tcd
->tcd_shutting_down
= 1;
1227 list_for_each_entry_safe(tage
, tmp
, &tcd
->tcd_pages
,
1229 list_del(&tage
->linkage
);
1232 tcd
->tcd_cur_pages
= 0;
1242 trace_cleanup_on_all_cpus();
1244 for (i
= 0; i
< num_possible_cpus(); i
++) {
1245 for (j
= 0; j
< 3; j
++) {
1246 if (trace_console_buffers
[i
][j
] != NULL
) {
1247 kfree(trace_console_buffers
[i
][j
]);
1248 trace_console_buffers
[i
][j
] = NULL
;
1253 for (i
= 0; i
< TCD_TYPE_MAX
&& trace_data
[i
] != NULL
; i
++) {
1254 kfree(trace_data
[i
]);
1255 trace_data
[i
] = NULL
;
1260 spl_debug_fini(void)
1265 #endif /* DEBUG_LOG */