2 * This file is part of the SPL: Solaris Porting Layer.
4 * This file was originally part of Lustre, http://www.lustre.org.
5 * but has subsequently been adapted for use in the SPL in
6 * accordance with the GPL.
8 * Copyright (C) 2004 Cluster File Systems, Inc.
9 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
10 * Produced at Lawrence Livermore National Laboratory
12 * Zach Brown <zab@clusterfs.com>
13 * Phil Schwan <phil@clusterfs.com>
14 * Brian Behlendorf <behlendorf1@llnl.gov>,
15 * Herb Wartens <wartens2@llnl.gov>,
16 * Jim Garlick <garlick@llnl.gov>
19 * This is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This is distributed in the hope that it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #include <linux/kmod.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pagemap.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/hardirq.h>
42 #include <linux/interrupt.h>
43 #include <sys/sysmacros.h>
45 #include <sys/debug.h>
48 #ifdef DEBUG_SUBSYSTEM
49 #undef DEBUG_SUBSYSTEM
52 #define DEBUG_SUBSYSTEM S_DEBUG
54 unsigned long spl_debug_subsys
= ~0;
55 EXPORT_SYMBOL(spl_debug_subsys
);
56 module_param(spl_debug_subsys
, long, 0644);
57 MODULE_PARM_DESC(spl_debug_subsys
, "Subsystem debugging level mask.");
59 unsigned long spl_debug_mask
= (D_EMERG
| D_ERROR
| D_WARNING
| D_CONSOLE
);
60 EXPORT_SYMBOL(spl_debug_mask
);
61 module_param(spl_debug_mask
, long, 0644);
62 MODULE_PARM_DESC(spl_debug_mask
, "Debugging level mask.");
64 unsigned long spl_debug_printk
= D_CANTMASK
;
65 EXPORT_SYMBOL(spl_debug_printk
);
66 module_param(spl_debug_printk
, long, 0644);
67 MODULE_PARM_DESC(spl_debug_printk
, "Console printk level mask.");
69 int spl_debug_mb
= -1;
70 EXPORT_SYMBOL(spl_debug_mb
);
71 module_param(spl_debug_mb
, int, 0644);
72 MODULE_PARM_DESC(spl_debug_mb
, "Total debug buffer size.");
74 unsigned int spl_debug_binary
= 1;
75 EXPORT_SYMBOL(spl_debug_binary
);
77 unsigned int spl_debug_catastrophe
;
78 EXPORT_SYMBOL(spl_debug_catastrophe
);
80 unsigned int spl_debug_panic_on_bug
= 1;
81 EXPORT_SYMBOL(spl_debug_panic_on_bug
);
82 module_param(spl_debug_panic_on_bug
, int, 0644);
83 MODULE_PARM_DESC(spl_debug_panic_on_bug
, "Panic on BUG");
85 static char spl_debug_file_name
[PATH_MAX
];
86 char spl_debug_file_path
[PATH_MAX
] = "/var/dumps/spl-log";
88 unsigned int spl_console_ratelimit
= 1;
89 EXPORT_SYMBOL(spl_console_ratelimit
);
91 long spl_console_max_delay
;
92 EXPORT_SYMBOL(spl_console_max_delay
);
94 long spl_console_min_delay
;
95 EXPORT_SYMBOL(spl_console_min_delay
);
97 unsigned int spl_console_backoff
= SPL_DEFAULT_BACKOFF
;
98 EXPORT_SYMBOL(spl_console_backoff
);
100 unsigned int spl_debug_stack
;
101 EXPORT_SYMBOL(spl_debug_stack
);
103 static int spl_panic_in_progress
;
105 union trace_data_union (*trace_data
[TCD_TYPE_MAX
])[NR_CPUS
] __cacheline_aligned
;
106 char *trace_console_buffers
[NR_CPUS
][3];
107 struct rw_semaphore trace_sem
;
108 atomic_t trace_tage_allocated
= ATOMIC_INIT(0);
110 static int spl_debug_dump_all_pages(dumplog_priv_t
*dp
, char *);
111 static void trace_fini(void);
114 /* Memory percentage breakdown by type */
115 static unsigned int pages_factor
[TCD_TYPE_MAX
] = {
116 80, /* 80% pages for TCD_TYPE_PROC */
117 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
118 10 /* 10% pages for TCD_TYPE_IRQ */
122 spl_debug_subsys2str(int subsys
)
163 spl_debug_dbg2str(int debug
)
190 spl_debug_mask2str(char *str
, int size
, unsigned long mask
, int is_subsys
)
192 const char *(*fn
)(int bit
) = is_subsys
? spl_debug_subsys2str
:
197 if (mask
== 0) { /* "0" */
201 } else { /* space-separated tokens */
202 for (i
= 0; i
< 32; i
++) {
205 if ((mask
& bit
) == 0)
209 if (token
== NULL
) /* unused bit */
212 if (len
> 0) { /* separator? */
218 while (*token
!= 0) {
227 /* terminate 'str' */
237 spl_debug_token2mask(int *mask
, const char *str
, int len
, int is_subsys
)
239 const char *(*fn
)(int bit
) = is_subsys
? spl_debug_subsys2str
:
244 /* match against known tokens */
245 for (i
= 0; i
< 32; i
++) {
249 if (token
== NULL
) /* unused? */
254 if (j
== len
) { /* end of token */
265 if (str
[j
] == token
[j
])
268 if (str
[j
] < 'A' || 'Z' < str
[j
])
271 if (str
[j
] - 'A' + 'a' != token
[j
])
276 return -EINVAL
; /* no match */
280 spl_debug_str2mask(unsigned long *mask
, const char *str
, int is_subsys
)
283 int m
= 0, matched
, n
, t
;
285 /* Allow a number for backwards compatibility */
286 for (n
= strlen(str
); n
> 0; n
--)
287 if (!isspace(str
[n
-1]))
291 if ((t
= sscanf(str
, "%i%n", &m
, &matched
)) >= 1 && matched
== n
) {
296 /* <str> must be a list of debug tokens or numbers separated by
297 * whitespace and optionally an operator ('+' or '-'). If an operator
298 * appears first in <str>, '*mask' is used as the starting point
299 * (relative), otherwise 0 is used (absolute). An operator applies to
300 * all following tokens up to the next operator. */
303 while (isspace(*str
)) /* skip whitespace */
309 if (*str
== '+' || *str
== '-') {
312 /* op on first token == relative */
316 while (isspace(*str
)) /* skip whitespace */
319 if (*str
== 0) /* trailing op */
323 /* find token length */
324 for (n
= 0; str
[n
] != 0 && !isspace(str
[n
]); n
++);
327 if (spl_debug_token2mask(&t
, str
, n
, is_subsys
) != 0)
347 spl_debug_dumplog_internal(dumplog_priv_t
*dp
)
351 journal_info
= current
->journal_info
;
352 current
->journal_info
= NULL
;
354 snprintf(spl_debug_file_name
, sizeof(spl_debug_file_path
) - 1,
355 "%s.%ld.%ld", spl_debug_file_path
,
356 get_seconds(), (long)dp
->dp_pid
);
357 printk(KERN_ALERT
"SPL: dumping log to %s\n", spl_debug_file_name
);
358 spl_debug_dump_all_pages(dp
, spl_debug_file_name
);
360 current
->journal_info
= journal_info
;
364 spl_debug_dumplog_thread(void *arg
)
366 dumplog_priv_t
*dp
= (dumplog_priv_t
*)arg
;
368 spl_debug_dumplog_internal(dp
);
369 atomic_set(&dp
->dp_done
, 1);
370 wake_up(&dp
->dp_waitq
);
371 complete_and_exit(NULL
, 0);
373 return 0; /* Unreachable */
376 /* When flag is set do not use a new thread for the debug dump */
378 spl_debug_dumplog(int flags
)
380 struct task_struct
*tsk
;
383 init_waitqueue_head(&dp
.dp_waitq
);
384 dp
.dp_pid
= current
->pid
;
386 atomic_set(&dp
.dp_done
, 0);
388 if (dp
.dp_flags
& DL_NOTHREAD
) {
389 spl_debug_dumplog_internal(&dp
);
392 tsk
= kthread_create(spl_debug_dumplog_thread
,(void *)&dp
,"spl_debug");
396 wake_up_process(tsk
);
397 wait_event(dp
.dp_waitq
, atomic_read(&dp
.dp_done
));
402 EXPORT_SYMBOL(spl_debug_dumplog
);
405 trace_get_console_buffer(void)
412 } else if (in_softirq()) {
418 return trace_console_buffers
[cpu
][idx
];
422 trace_put_console_buffer(char *buffer
)
427 static struct trace_cpu_data
*
434 return &(*trace_data
[TCD_TYPE_IRQ
])[cpu
].tcd
;
435 else if (in_softirq())
436 return &(*trace_data
[TCD_TYPE_SOFTIRQ
])[cpu
].tcd
;
438 return &(*trace_data
[TCD_TYPE_PROC
])[cpu
].tcd
;
442 trace_put_tcd (struct trace_cpu_data
*tcd
)
448 trace_lock_tcd(struct trace_cpu_data
*tcd
)
450 __ASSERT(tcd
->tcd_type
< TCD_TYPE_MAX
);
452 if (tcd
->tcd_type
== TCD_TYPE_IRQ
)
454 else if (tcd
->tcd_type
== TCD_TYPE_SOFTIRQ
)
461 trace_unlock_tcd(struct trace_cpu_data
*tcd
)
463 __ASSERT(tcd
->tcd_type
< TCD_TYPE_MAX
);
465 if (tcd
->tcd_type
== TCD_TYPE_IRQ
)
467 else if (tcd
->tcd_type
== TCD_TYPE_SOFTIRQ
)
472 trace_set_debug_header(struct spl_debug_header
*header
, int subsys
,
473 int mask
, const int line
, unsigned long stack
)
477 do_gettimeofday(&tv
);
479 header
->ph_subsys
= subsys
;
480 header
->ph_mask
= mask
;
481 header
->ph_cpu_id
= smp_processor_id();
482 header
->ph_sec
= (__u32
)tv
.tv_sec
;
483 header
->ph_usec
= tv
.tv_usec
;
484 header
->ph_stack
= stack
;
485 header
->ph_pid
= current
->pid
;
486 header
->ph_line_num
= line
;
492 trace_print_to_console(struct spl_debug_header
*hdr
, int mask
, const char *buf
,
493 int len
, const char *file
, const char *fn
)
495 char *prefix
= "SPL", *ptype
= NULL
;
497 if ((mask
& D_EMERG
) != 0) {
500 } else if ((mask
& D_ERROR
) != 0) {
503 } else if ((mask
& D_WARNING
) != 0) {
505 ptype
= KERN_WARNING
;
506 } else if ((mask
& (D_CONSOLE
| spl_debug_printk
)) != 0) {
511 if ((mask
& D_CONSOLE
) != 0) {
512 printk("%s%s: %.*s", ptype
, prefix
, len
, buf
);
514 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype
, prefix
,
515 hdr
->ph_pid
, hdr
->ph_stack
, file
,
516 hdr
->ph_line_num
, fn
, len
, buf
);
523 trace_max_debug_mb(void)
525 return MAX(512, ((num_physpages
>> (20 - PAGE_SHIFT
)) * 80) / 100);
529 trace_call_on_all_cpus(void (*fn
)(void *arg
), void *arg
)
531 cpumask_t mask
, cpus_allowed
= current
->cpus_allowed
;
534 for_each_online_cpu(cpu
) {
537 set_cpus_allowed(current
, mask
);
541 set_cpus_allowed(current
, cpus_allowed
);
545 static struct trace_page
*
549 struct trace_page
*tage
;
551 page
= alloc_pages(gfp
| __GFP_NOWARN
, 0);
555 tage
= kmalloc(sizeof(*tage
), gfp
);
557 __free_pages(page
, 0);
562 atomic_inc(&trace_tage_allocated
);
568 tage_free(struct trace_page
*tage
)
570 __ASSERT(tage
!= NULL
);
571 __ASSERT(tage
->page
!= NULL
);
573 __free_pages(tage
->page
, 0);
575 atomic_dec(&trace_tage_allocated
);
578 static struct trace_page
*
579 tage_from_list(struct list_head
*list
)
581 return list_entry(list
, struct trace_page
, linkage
);
585 tage_to_tail(struct trace_page
*tage
, struct list_head
*queue
)
587 __ASSERT(tage
!= NULL
);
588 __ASSERT(queue
!= NULL
);
590 list_move_tail(&tage
->linkage
, queue
);
593 /* try to return a page that has 'len' bytes left at the end */
594 static struct trace_page
*
595 trace_get_tage_try(struct trace_cpu_data
*tcd
, unsigned long len
)
597 struct trace_page
*tage
;
599 if (tcd
->tcd_cur_pages
> 0) {
600 __ASSERT(!list_empty(&tcd
->tcd_pages
));
601 tage
= tage_from_list(tcd
->tcd_pages
.prev
);
602 if (tage
->used
+ len
<= PAGE_SIZE
)
606 if (tcd
->tcd_cur_pages
< tcd
->tcd_max_pages
) {
607 if (tcd
->tcd_cur_stock_pages
> 0) {
608 tage
= tage_from_list(tcd
->tcd_stock_pages
.prev
);
609 tcd
->tcd_cur_stock_pages
--;
610 list_del_init(&tage
->linkage
);
612 tage
= tage_alloc(GFP_ATOMIC
);
615 "failure to allocate a tage (%ld)\n",
622 tage
->cpu
= smp_processor_id();
623 tage
->type
= tcd
->tcd_type
;
624 list_add_tail(&tage
->linkage
, &tcd
->tcd_pages
);
625 tcd
->tcd_cur_pages
++;
633 /* return a page that has 'len' bytes left at the end */
634 static struct trace_page
*
635 trace_get_tage(struct trace_cpu_data
*tcd
, unsigned long len
)
637 struct trace_page
*tage
;
639 __ASSERT(len
<= PAGE_SIZE
);
641 tage
= trace_get_tage_try(tcd
, len
);
645 if (tcd
->tcd_cur_pages
> 0) {
646 tage
= tage_from_list(tcd
->tcd_pages
.next
);
648 tage_to_tail(tage
, &tcd
->tcd_pages
);
655 spl_debug_vmsg(spl_debug_limit_state_t
*cdls
, int subsys
, int mask
,
656 const char *file
, const char *fn
, const int line
,
657 const char *format1
, va_list args
, const char *format2
, ...)
659 struct trace_cpu_data
*tcd
= NULL
;
660 struct spl_debug_header header
;
661 struct trace_page
*tage
;
662 /* string_buf is used only if tcd != NULL, and is always set then */
663 char *string_buf
= NULL
;
666 int needed
= 85; /* average message length */
672 if (strchr(file
, '/'))
673 file
= strrchr(file
, '/') + 1;
675 trace_set_debug_header(&header
, subsys
, mask
, line
, CDEBUG_STACK());
677 tcd
= trace_get_tcd();
681 if (tcd
->tcd_shutting_down
) {
687 known_size
= strlen(file
) + 1;
689 known_size
+= strlen(fn
) + 1;
691 if (spl_debug_binary
)
692 known_size
+= sizeof(header
);
694 /* '2' used because vsnprintf returns real size required for output
695 * _without_ terminating NULL. */
696 for (i
= 0; i
< 2; i
++) {
697 tage
= trace_get_tage(tcd
, needed
+ known_size
+ 1);
699 if (needed
+ known_size
> PAGE_SIZE
)
707 string_buf
= (char *)page_address(tage
->page
) +
708 tage
->used
+ known_size
;
710 max_nob
= PAGE_SIZE
- tage
->used
- known_size
;
712 printk(KERN_EMERG
"negative max_nob: %i\n", max_nob
);
722 needed
= vsnprintf(string_buf
, max_nob
, format1
, ap
);
727 remain
= max_nob
- needed
;
731 va_start(ap
, format2
);
732 needed
+= vsnprintf(string_buf
+needed
, remain
, format2
, ap
);
736 if (needed
< max_nob
)
740 if (unlikely(*(string_buf
+ needed
- 1) != '\n'))
741 printk(KERN_INFO
"format at %s:%d:%s doesn't end in newline\n",
744 header
.ph_len
= known_size
+ needed
;
745 debug_buf
= (char *)page_address(tage
->page
) + tage
->used
;
747 if (spl_debug_binary
) {
748 memcpy(debug_buf
, &header
, sizeof(header
));
749 tage
->used
+= sizeof(header
);
750 debug_buf
+= sizeof(header
);
753 strcpy(debug_buf
, file
);
754 tage
->used
+= strlen(file
) + 1;
755 debug_buf
+= strlen(file
) + 1;
758 strcpy(debug_buf
, fn
);
759 tage
->used
+= strlen(fn
) + 1;
760 debug_buf
+= strlen(fn
) + 1;
763 __ASSERT(debug_buf
== string_buf
);
765 tage
->used
+= needed
;
766 __ASSERT (tage
->used
<= PAGE_SIZE
);
769 if ((mask
& spl_debug_printk
) == 0) {
770 /* no console output requested */
777 if (spl_console_ratelimit
&& cdls
->cdls_next
!= 0 &&
778 !time_before(cdls
->cdls_next
, jiffies
)) {
779 /* skipping a console message */
786 if (time_before(cdls
->cdls_next
+ spl_console_max_delay
+
787 (10 * HZ
), jiffies
)) {
788 /* last timeout was a long time ago */
789 cdls
->cdls_delay
/= spl_console_backoff
* 4;
791 cdls
->cdls_delay
*= spl_console_backoff
;
793 if (cdls
->cdls_delay
< spl_console_min_delay
)
794 cdls
->cdls_delay
= spl_console_min_delay
;
795 else if (cdls
->cdls_delay
> spl_console_max_delay
)
796 cdls
->cdls_delay
= spl_console_max_delay
;
799 /* ensure cdls_next is never zero after it's been seen */
800 cdls
->cdls_next
= (jiffies
+ cdls
->cdls_delay
) | 1;
804 trace_print_to_console(&header
, mask
, string_buf
, needed
, file
, fn
);
807 string_buf
= trace_get_console_buffer();
810 if (format1
!= NULL
) {
812 needed
= vsnprintf(string_buf
, TRACE_CONSOLE_BUFFER_SIZE
, format1
, ap
);
815 if (format2
!= NULL
) {
816 remain
= TRACE_CONSOLE_BUFFER_SIZE
- needed
;
818 va_start(ap
, format2
);
819 needed
+= vsnprintf(string_buf
+needed
, remain
, format2
, ap
);
823 trace_print_to_console(&header
, mask
,
824 string_buf
, needed
, file
, fn
);
826 trace_put_console_buffer(string_buf
);
829 if (cdls
!= NULL
&& cdls
->cdls_count
!= 0) {
830 string_buf
= trace_get_console_buffer();
832 needed
= snprintf(string_buf
, TRACE_CONSOLE_BUFFER_SIZE
,
833 "Skipped %d previous similar message%s\n",
834 cdls
->cdls_count
, (cdls
->cdls_count
> 1) ? "s" : "");
836 trace_print_to_console(&header
, mask
,
837 string_buf
, needed
, file
, fn
);
839 trace_put_console_buffer(string_buf
);
840 cdls
->cdls_count
= 0;
845 EXPORT_SYMBOL(spl_debug_vmsg
);
847 /* Do the collect_pages job on a single CPU: assumes that all other
848 * CPUs have been stopped during a panic. If this isn't true for
849 * some arch, this will have to be implemented separately in each arch.
852 collect_pages_from_single_cpu(struct page_collection
*pc
)
854 struct trace_cpu_data
*tcd
;
857 tcd_for_each(tcd
, i
, j
) {
858 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
859 tcd
->tcd_cur_pages
= 0;
864 collect_pages_on_cpu(void *info
)
866 struct trace_cpu_data
*tcd
;
867 struct page_collection
*pc
= info
;
870 spin_lock(&pc
->pc_lock
);
871 tcd_for_each_type_lock(tcd
, i
) {
872 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
873 tcd
->tcd_cur_pages
= 0;
875 spin_unlock(&pc
->pc_lock
);
879 collect_pages(dumplog_priv_t
*dp
, struct page_collection
*pc
)
881 INIT_LIST_HEAD(&pc
->pc_pages
);
883 if (spl_panic_in_progress
|| dp
->dp_flags
& DL_SINGLE_CPU
)
884 collect_pages_from_single_cpu(pc
);
886 trace_call_on_all_cpus(collect_pages_on_cpu
, pc
);
890 put_pages_back_on_cpu(void *info
)
892 struct page_collection
*pc
= info
;
893 struct trace_cpu_data
*tcd
;
894 struct list_head
*cur_head
;
895 struct trace_page
*tage
;
896 struct trace_page
*tmp
;
899 spin_lock(&pc
->pc_lock
);
900 tcd_for_each_type_lock(tcd
, i
) {
901 cur_head
= tcd
->tcd_pages
.next
;
903 list_for_each_entry_safe(tage
, tmp
, &pc
->pc_pages
, linkage
) {
905 __ASSERT_TAGE_INVARIANT(tage
);
907 if (tage
->cpu
!= smp_processor_id() || tage
->type
!= i
)
910 tage_to_tail(tage
, cur_head
);
911 tcd
->tcd_cur_pages
++;
914 spin_unlock(&pc
->pc_lock
);
918 put_pages_back(struct page_collection
*pc
)
920 if (!spl_panic_in_progress
)
921 trace_call_on_all_cpus(put_pages_back_on_cpu
, pc
);
925 trace_filp_open (const char *name
, int flags
, int mode
, int *err
)
927 struct file
*filp
= NULL
;
930 filp
= filp_open(name
, flags
, mode
);
933 printk(KERN_ERR
"SPL: Can't open %s file: %d\n", name
, rc
);
941 #define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
942 #define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
943 #define trace_filp_close(f) filp_close(f, NULL)
944 #define trace_filp_poff(f) (&(f)->f_pos)
947 spl_debug_dump_all_pages(dumplog_priv_t
*dp
, char *filename
)
949 struct page_collection pc
;
951 struct trace_page
*tage
;
952 struct trace_page
*tmp
;
956 down_write(&trace_sem
);
958 filp
= trace_filp_open(filename
, O_CREAT
|O_EXCL
|O_WRONLY
|O_LARGEFILE
,
962 printk(KERN_ERR
"SPL: Can't open %s for dump: %d\n",
967 spin_lock_init(&pc
.pc_lock
);
968 collect_pages(dp
, &pc
);
969 if (list_empty(&pc
.pc_pages
)) {
977 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
978 __ASSERT_TAGE_INVARIANT(tage
);
980 rc
= trace_filp_write(filp
, page_address(tage
->page
),
981 tage
->used
, trace_filp_poff(filp
));
982 if (rc
!= (int)tage
->used
) {
983 printk(KERN_WARNING
"SPL: Wanted to write %u "
984 "but wrote %d\n", tage
->used
, rc
);
986 __ASSERT(list_empty(&pc
.pc_pages
));
989 list_del(&tage
->linkage
);
995 rc
= trace_filp_fsync(filp
);
997 printk(KERN_ERR
"SPL: Unable to sync: %d\n", rc
);
999 trace_filp_close(filp
);
1001 up_write(&trace_sem
);
1007 spl_debug_flush_pages(void)
1010 struct page_collection pc
;
1011 struct trace_page
*tage
;
1012 struct trace_page
*tmp
;
1014 spin_lock_init(&pc
.pc_lock
);
1015 init_waitqueue_head(&dp
.dp_waitq
);
1016 dp
.dp_pid
= current
->pid
;
1018 atomic_set(&dp
.dp_done
, 0);
1020 collect_pages(&dp
, &pc
);
1021 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
1022 __ASSERT_TAGE_INVARIANT(tage
);
1023 list_del(&tage
->linkage
);
1029 spl_debug_set_mask(unsigned long mask
) {
1030 spl_debug_mask
= mask
;
1033 EXPORT_SYMBOL(spl_debug_set_mask
);
1036 spl_debug_get_mask(void) {
1037 return spl_debug_mask
;
1039 EXPORT_SYMBOL(spl_debug_get_mask
);
1042 spl_debug_set_subsys(unsigned long subsys
) {
1043 spl_debug_subsys
= subsys
;
1046 EXPORT_SYMBOL(spl_debug_set_subsys
);
1049 spl_debug_get_subsys(void) {
1050 return spl_debug_subsys
;
1052 EXPORT_SYMBOL(spl_debug_get_subsys
);
1055 spl_debug_set_mb(int mb
)
1058 int limit
= trace_max_debug_mb();
1059 struct trace_cpu_data
*tcd
;
1061 if (mb
< num_possible_cpus()) {
1062 printk(KERN_ERR
"SPL: Refusing to set debug buffer size to "
1063 "%dMB - lower limit is %d\n", mb
, num_possible_cpus());
1068 printk(KERN_ERR
"SPL: Refusing to set debug buffer size to "
1069 "%dMB - upper limit is %d\n", mb
, limit
);
1073 mb
/= num_possible_cpus();
1074 pages
= mb
<< (20 - PAGE_SHIFT
);
1076 down_write(&trace_sem
);
1078 tcd_for_each(tcd
, i
, j
)
1079 tcd
->tcd_max_pages
= (pages
* tcd
->tcd_pages_factor
) / 100;
1081 up_write(&trace_sem
);
1085 EXPORT_SYMBOL(spl_debug_set_mb
);
1088 spl_debug_get_mb(void)
1091 struct trace_cpu_data
*tcd
;
1092 int total_pages
= 0;
1094 down_read(&trace_sem
);
1096 tcd_for_each(tcd
, i
, j
)
1097 total_pages
+= tcd
->tcd_max_pages
;
1099 up_read(&trace_sem
);
1101 return (total_pages
>> (20 - PAGE_SHIFT
)) + 1;
1103 EXPORT_SYMBOL(spl_debug_get_mb
);
1105 void spl_debug_dumpstack(struct task_struct
*tsk
)
1107 extern void show_task(struct task_struct
*);
1112 printk(KERN_ERR
"SPL: Showing stack for process %d\n", tsk
->pid
);
1115 EXPORT_SYMBOL(spl_debug_dumpstack
);
1117 void spl_debug_bug(char *file
, const char *func
, const int line
, int flags
)
1119 spl_debug_catastrophe
= 1;
1120 spl_debug_msg(NULL
, 0, D_EMERG
, file
, func
, line
, "SBUG\n");
1122 if (in_interrupt()) {
1123 panic("SBUG in interrupt.\n");
1127 /* Ensure all debug pages and dumped by current cpu */
1128 if (spl_debug_panic_on_bug
)
1129 spl_panic_in_progress
= 1;
1131 spl_debug_dumpstack(NULL
);
1132 spl_debug_dumplog(flags
);
1134 if (spl_debug_panic_on_bug
)
1137 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
1141 EXPORT_SYMBOL(spl_debug_bug
);
1144 spl_debug_clear_buffer(void)
1146 spl_debug_flush_pages();
1149 EXPORT_SYMBOL(spl_debug_clear_buffer
);
1152 spl_debug_mark_buffer(char *text
)
1154 CDEBUG(D_WARNING
, "*************************************\n");
1155 CDEBUG(D_WARNING
, "DEBUG MARKER: %s\n", text
);
1156 CDEBUG(D_WARNING
, "*************************************\n");
1160 EXPORT_SYMBOL(spl_debug_mark_buffer
);
1163 trace_init(int max_pages
)
1165 struct trace_cpu_data
*tcd
;
1168 init_rwsem(&trace_sem
);
1170 /* initialize trace_data */
1171 memset(trace_data
, 0, sizeof(trace_data
));
1172 for (i
= 0; i
< TCD_TYPE_MAX
; i
++) {
1173 trace_data
[i
] = kmalloc(sizeof(union trace_data_union
) *
1174 NR_CPUS
, GFP_KERNEL
);
1175 if (trace_data
[i
] == NULL
)
1179 tcd_for_each(tcd
, i
, j
) {
1180 tcd
->tcd_pages_factor
= pages_factor
[i
];
1183 INIT_LIST_HEAD(&tcd
->tcd_pages
);
1184 INIT_LIST_HEAD(&tcd
->tcd_stock_pages
);
1185 tcd
->tcd_cur_pages
= 0;
1186 tcd
->tcd_cur_stock_pages
= 0;
1187 tcd
->tcd_max_pages
= (max_pages
* pages_factor
[i
]) / 100;
1188 tcd
->tcd_shutting_down
= 0;
1191 for (i
= 0; i
< num_possible_cpus(); i
++) {
1192 for (j
= 0; j
< 3; j
++) {
1193 trace_console_buffers
[i
][j
] =
1194 kmalloc(TRACE_CONSOLE_BUFFER_SIZE
,
1197 if (trace_console_buffers
[i
][j
] == NULL
)
1205 printk(KERN_ERR
"SPL: Insufficient memory for debug logs\n");
1212 int rc
, max
= spl_debug_mb
;
1214 spl_console_max_delay
= SPL_DEFAULT_MAX_DELAY
;
1215 spl_console_min_delay
= SPL_DEFAULT_MIN_DELAY
;
1217 /* If spl_debug_mb is set to an invalid value or uninitialized
1218 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1219 if (max
> (num_physpages
>> (20 - 2 - PAGE_SHIFT
)) / 5 ||
1220 max
>= 512 || max
< 0) {
1221 max
= TCD_MAX_PAGES
;
1223 max
= (max
/ num_online_cpus()) << (20 - PAGE_SHIFT
);
1226 rc
= trace_init(max
);
1234 trace_cleanup_on_cpu(void *info
)
1236 struct trace_cpu_data
*tcd
;
1237 struct trace_page
*tage
;
1238 struct trace_page
*tmp
;
1241 tcd_for_each_type_lock(tcd
, i
) {
1242 tcd
->tcd_shutting_down
= 1;
1244 list_for_each_entry_safe(tage
, tmp
, &tcd
->tcd_pages
, linkage
) {
1245 __ASSERT_TAGE_INVARIANT(tage
);
1247 list_del(&tage
->linkage
);
1250 tcd
->tcd_cur_pages
= 0;
1259 trace_call_on_all_cpus(trace_cleanup_on_cpu
, NULL
);
1261 for (i
= 0; i
< num_possible_cpus(); i
++) {
1262 for (j
= 0; j
< 3; j
++) {
1263 if (trace_console_buffers
[i
][j
] != NULL
) {
1264 kfree(trace_console_buffers
[i
][j
]);
1265 trace_console_buffers
[i
][j
] = NULL
;
1270 for (i
= 0; trace_data
[i
] != NULL
; i
++) {
1271 kfree(trace_data
[i
]);
1272 trace_data
[i
] = NULL
;