]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-debug.c
Dump log from current process when required
[mirror_spl-debian.git] / module / spl / spl-debug.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
57d1b188 3 *
715f6251 4 * This file was originally part of Lustre, http://www.lustre.org.
5 * but has subsequently been adapted for use in the SPL in
6 * accordance with the GPL.
57d1b188 7 *
715f6251 8 * Copyright (C) 2004 Cluster File Systems, Inc.
9 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
10 * Produced at Lawrence Livermore National Laboratory
11 * Written by:
12 * Zach Brown <zab@clusterfs.com>
13 * Phil Schwan <phil@clusterfs.com>
14 * Brian Behlendorf <behlendorf1@llnl.gov>,
15 * Herb Wartens <wartens2@llnl.gov>,
16 * Jim Garlick <garlick@llnl.gov>
17 * UCRL-CODE-235197
57d1b188 18 *
715f6251 19 * This is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
57d1b188 23 *
715f6251 24 * This is distributed in the hope that it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
57d1b188 28 *
715f6251 29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
57d1b188 32 */
33
34#include <linux/kmod.h>
35#include <linux/mm.h>
36#include <linux/vmalloc.h>
37#include <linux/pagemap.h>
38#include <linux/slab.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/hardirq.h>
42#include <linux/interrupt.h>
f6c81c5e 43#include <linux/spinlock.h>
57d1b188 44#include <sys/sysmacros.h>
45#include <sys/proc.h>
46#include <sys/debug.h>
47#include <spl-ctl.h>
57d1b188 48
49#ifdef DEBUG_SUBSYSTEM
50#undef DEBUG_SUBSYSTEM
51#endif
52
53#define DEBUG_SUBSYSTEM S_DEBUG
54
55unsigned long spl_debug_subsys = ~0;
56EXPORT_SYMBOL(spl_debug_subsys);
57module_param(spl_debug_subsys, long, 0644);
58MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
59
60unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
61EXPORT_SYMBOL(spl_debug_mask);
62module_param(spl_debug_mask, long, 0644);
63MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
64
65unsigned long spl_debug_printk = D_CANTMASK;
66EXPORT_SYMBOL(spl_debug_printk);
67module_param(spl_debug_printk, long, 0644);
68MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
69
70int spl_debug_mb = -1;
71EXPORT_SYMBOL(spl_debug_mb);
72module_param(spl_debug_mb, int, 0644);
73MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
74
75unsigned int spl_debug_binary = 1;
76EXPORT_SYMBOL(spl_debug_binary);
77
78unsigned int spl_debug_catastrophe;
79EXPORT_SYMBOL(spl_debug_catastrophe);
80
81unsigned int spl_debug_panic_on_bug = 1;
82EXPORT_SYMBOL(spl_debug_panic_on_bug);
83module_param(spl_debug_panic_on_bug, int, 0644);
84MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
85
86static char spl_debug_file_name[PATH_MAX];
87char spl_debug_file_path[PATH_MAX] = "/var/dumps/spl-log";
88
89unsigned int spl_console_ratelimit = 1;
90EXPORT_SYMBOL(spl_console_ratelimit);
91
92long spl_console_max_delay;
93EXPORT_SYMBOL(spl_console_max_delay);
94
95long spl_console_min_delay;
96EXPORT_SYMBOL(spl_console_min_delay);
97
98unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
99EXPORT_SYMBOL(spl_console_backoff);
100
101unsigned int spl_debug_stack;
102EXPORT_SYMBOL(spl_debug_stack);
103
104static int spl_panic_in_progress;
105
106union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
107char *trace_console_buffers[NR_CPUS][3];
108struct rw_semaphore trace_sem;
109atomic_t trace_tage_allocated = ATOMIC_INIT(0);
110
7fea96c0 111static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
57d1b188 112static void trace_fini(void);
113
114
115/* Memory percentage breakdown by type */
116static unsigned int pages_factor[TCD_TYPE_MAX] = {
117 80, /* 80% pages for TCD_TYPE_PROC */
118 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
119 10 /* 10% pages for TCD_TYPE_IRQ */
120};
121
57d1b188 122const char *
123spl_debug_subsys2str(int subsys)
124{
125 switch (subsys) {
126 default:
127 return NULL;
128 case S_UNDEFINED:
129 return "undefined";
130 case S_ATOMIC:
131 return "atomic";
132 case S_KOBJ:
133 return "kobj";
134 case S_VNODE:
135 return "vnode";
136 case S_TIME:
137 return "time";
138 case S_RWLOCK:
139 return "rwlock";
140 case S_THREAD:
141 return "thread";
142 case S_CONDVAR:
143 return "condvar";
144 case S_MUTEX:
145 return "mutex";
146 case S_RNG:
147 return "rng";
148 case S_TASKQ:
149 return "taskq";
150 case S_KMEM:
151 return "kmem";
e5bbd245 152 case S_DEBUG:
153 return "debug";
154 case S_GENERIC:
155 return "generic";
156 case S_PROC:
157 return "proc";
158 case S_MODULE:
159 return "module";
ec7d53e9
BB
160 case S_CRED:
161 return "cred";
57d1b188 162 }
163}
164
165const char *
166spl_debug_dbg2str(int debug)
167{
168 switch (debug) {
169 default:
170 return NULL;
171 case D_TRACE:
172 return "trace";
173 case D_INFO:
174 return "info";
175 case D_WARNING:
176 return "warning";
177 case D_ERROR:
178 return "error";
179 case D_EMERG:
180 return "emerg";
181 case D_CONSOLE:
182 return "console";
183 case D_IOCTL:
184 return "ioctl";
185 case D_DPRINTF:
186 return "dprintf";
187 case D_OTHER:
188 return "other";
189 }
190}
191
192int
193spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
194{
195 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
196 spl_debug_dbg2str;
197 const char *token;
198 int i, bit, len = 0;
199
200 if (mask == 0) { /* "0" */
201 if (size > 0)
202 str[0] = '0';
203 len = 1;
204 } else { /* space-separated tokens */
205 for (i = 0; i < 32; i++) {
206 bit = 1 << i;
207
208 if ((mask & bit) == 0)
209 continue;
210
211 token = fn(bit);
212 if (token == NULL) /* unused bit */
213 continue;
214
215 if (len > 0) { /* separator? */
216 if (len < size)
217 str[len] = ' ';
218 len++;
219 }
220
221 while (*token != 0) {
222 if (len < size)
223 str[len] = *token;
224 token++;
225 len++;
226 }
227 }
228 }
229
230 /* terminate 'str' */
231 if (len < size)
232 str[len] = 0;
233 else
234 str[size - 1] = 0;
235
236 return len;
237}
238
239static int
240spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
241{
242 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
243 spl_debug_dbg2str;
244 const char *token;
245 int i, j, bit;
246
247 /* match against known tokens */
248 for (i = 0; i < 32; i++) {
249 bit = 1 << i;
250
251 token = fn(bit);
252 if (token == NULL) /* unused? */
253 continue;
254
255 /* strcasecmp */
256 for (j = 0; ; j++) {
257 if (j == len) { /* end of token */
258 if (token[j] == 0) {
259 *mask = bit;
260 return 0;
261 }
262 break;
263 }
264
265 if (token[j] == 0)
266 break;
267
268 if (str[j] == token[j])
269 continue;
270
271 if (str[j] < 'A' || 'Z' < str[j])
272 break;
273
274 if (str[j] - 'A' + 'a' != token[j])
275 break;
276 }
277 }
278
279 return -EINVAL; /* no match */
280}
281
282int
283spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
284{
285 char op = 0;
286 int m = 0, matched, n, t;
287
288 /* Allow a number for backwards compatibility */
289 for (n = strlen(str); n > 0; n--)
290 if (!isspace(str[n-1]))
291 break;
292 matched = n;
293
294 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
295 *mask = m;
296 return 0;
297 }
298
299 /* <str> must be a list of debug tokens or numbers separated by
300 * whitespace and optionally an operator ('+' or '-'). If an operator
301 * appears first in <str>, '*mask' is used as the starting point
302 * (relative), otherwise 0 is used (absolute). An operator applies to
303 * all following tokens up to the next operator. */
304 matched = 0;
305 while (*str != 0) {
306 while (isspace(*str)) /* skip whitespace */
307 str++;
308
309 if (*str == 0)
310 break;
311
312 if (*str == '+' || *str == '-') {
313 op = *str++;
314
315 /* op on first token == relative */
316 if (!matched)
317 m = *mask;
318
319 while (isspace(*str)) /* skip whitespace */
320 str++;
321
322 if (*str == 0) /* trailing op */
323 return -EINVAL;
324 }
325
326 /* find token length */
327 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
328
329 /* match token */
330 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
331 return -EINVAL;
332
333 matched = 1;
334 if (op == '-')
335 m &= ~t;
336 else
337 m |= t;
338
339 str += n;
340 }
341
342 if (!matched)
343 return -EINVAL;
344
345 *mask = m;
346 return 0;
347}
348
57d1b188 349static void
350spl_debug_dumplog_internal(dumplog_priv_t *dp)
351{
352 void *journal_info;
353
354 journal_info = current->journal_info;
355 current->journal_info = NULL;
356
357 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
358 "%s.%ld.%ld", spl_debug_file_path,
359 get_seconds(), (long)dp->dp_pid);
360 printk(KERN_ALERT "SPL: dumping log to %s\n", spl_debug_file_name);
7fea96c0 361 spl_debug_dump_all_pages(dp, spl_debug_file_name);
57d1b188 362
363 current->journal_info = journal_info;
364}
365
366static int
367spl_debug_dumplog_thread(void *arg)
368{
369 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
370
371 spl_debug_dumplog_internal(dp);
7fea96c0 372 atomic_set(&dp->dp_done, 1);
57d1b188 373 wake_up(&dp->dp_waitq);
3d061e9d 374 complete_and_exit(NULL, 0);
57d1b188 375
376 return 0; /* Unreachable */
377}
378
7fea96c0 379/* When flag is set do not use a new thread for the debug dump */
57d1b188 380int
7fea96c0 381spl_debug_dumplog(int flags)
57d1b188 382{
383 struct task_struct *tsk;
384 dumplog_priv_t dp;
57d1b188 385
7fea96c0 386 init_waitqueue_head(&dp.dp_waitq);
387 dp.dp_pid = current->pid;
388 dp.dp_flags = flags;
389 atomic_set(&dp.dp_done, 0);
57d1b188 390
7fea96c0 391 if (dp.dp_flags & DL_NOTHREAD) {
392 spl_debug_dumplog_internal(&dp);
393 } else {
57d1b188 394
7fea96c0 395 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
396 if (tsk == NULL)
397 return -ENOMEM;
398
399 wake_up_process(tsk);
400 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
401 }
57d1b188 402
a8ac0b89 403 return 0;
57d1b188 404}
405EXPORT_SYMBOL(spl_debug_dumplog);
406
407static char *
408trace_get_console_buffer(void)
409{
410 int cpu = get_cpu();
411 int idx;
412
413 if (in_irq()) {
414 idx = 0;
415 } else if (in_softirq()) {
416 idx = 1;
417 } else {
418 idx = 2;
419 }
420
421 return trace_console_buffers[cpu][idx];
422}
423
424static void
425trace_put_console_buffer(char *buffer)
426{
427 put_cpu();
428}
429
f6c81c5e 430static int
431trace_lock_tcd(struct trace_cpu_data *tcd)
57d1b188 432{
f6c81c5e 433 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
57d1b188 434
f6c81c5e 435 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
57d1b188 436
f6c81c5e 437 return 1;
57d1b188 438}
439
440static void
f6c81c5e 441trace_unlock_tcd(struct trace_cpu_data *tcd)
57d1b188 442{
f6c81c5e 443 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
444
445 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
57d1b188 446}
447
f6c81c5e 448static struct trace_cpu_data *
449trace_get_tcd(void)
57d1b188 450{
f6c81c5e 451 int cpu;
452 struct trace_cpu_data *tcd;
453
454 cpu = get_cpu();
455 if (in_irq())
456 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
457 else if (in_softirq())
458 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
459 else
460 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
57d1b188 461
f6c81c5e 462 trace_lock_tcd(tcd);
57d1b188 463
f6c81c5e 464 return tcd;
57d1b188 465}
466
467static void
f6c81c5e 468trace_put_tcd (struct trace_cpu_data *tcd)
57d1b188 469{
f6c81c5e 470 trace_unlock_tcd(tcd);
57d1b188 471
f6c81c5e 472 put_cpu();
57d1b188 473}
474
475static void
476trace_set_debug_header(struct spl_debug_header *header, int subsys,
477 int mask, const int line, unsigned long stack)
478{
479 struct timeval tv;
480
481 do_gettimeofday(&tv);
482
483 header->ph_subsys = subsys;
484 header->ph_mask = mask;
485 header->ph_cpu_id = smp_processor_id();
486 header->ph_sec = (__u32)tv.tv_sec;
487 header->ph_usec = tv.tv_usec;
488 header->ph_stack = stack;
489 header->ph_pid = current->pid;
490 header->ph_line_num = line;
491
492 return;
493}
494
495static void
496trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
497 int len, const char *file, const char *fn)
498{
499 char *prefix = "SPL", *ptype = NULL;
500
501 if ((mask & D_EMERG) != 0) {
502 prefix = "SPLError";
503 ptype = KERN_EMERG;
504 } else if ((mask & D_ERROR) != 0) {
505 prefix = "SPLError";
506 ptype = KERN_ERR;
507 } else if ((mask & D_WARNING) != 0) {
508 prefix = "SPL";
509 ptype = KERN_WARNING;
510 } else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
511 prefix = "SPL";
512 ptype = KERN_INFO;
513 }
514
515 if ((mask & D_CONSOLE) != 0) {
516 printk("%s%s: %.*s", ptype, prefix, len, buf);
517 } else {
892d5106 518 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
519 hdr->ph_pid, hdr->ph_stack, file,
520 hdr->ph_line_num, fn, len, buf);
57d1b188 521 }
522
523 return;
524}
525
526static int
527trace_max_debug_mb(void)
528{
529 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
530}
531
57d1b188 532static struct trace_page *
533tage_alloc(int gfp)
534{
535 struct page *page;
536 struct trace_page *tage;
537
538 page = alloc_pages(gfp | __GFP_NOWARN, 0);
539 if (page == NULL)
540 return NULL;
541
542 tage = kmalloc(sizeof(*tage), gfp);
543 if (tage == NULL) {
544 __free_pages(page, 0);
545 return NULL;
546 }
547
548 tage->page = page;
549 atomic_inc(&trace_tage_allocated);
550
551 return tage;
552}
553
554static void
555tage_free(struct trace_page *tage)
556{
557 __ASSERT(tage != NULL);
558 __ASSERT(tage->page != NULL);
559
560 __free_pages(tage->page, 0);
561 kfree(tage);
562 atomic_dec(&trace_tage_allocated);
563}
564
565static struct trace_page *
566tage_from_list(struct list_head *list)
567{
568 return list_entry(list, struct trace_page, linkage);
569}
570
571static void
572tage_to_tail(struct trace_page *tage, struct list_head *queue)
573{
574 __ASSERT(tage != NULL);
575 __ASSERT(queue != NULL);
576
577 list_move_tail(&tage->linkage, queue);
578}
579
580/* try to return a page that has 'len' bytes left at the end */
581static struct trace_page *
582trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
583{
584 struct trace_page *tage;
585
586 if (tcd->tcd_cur_pages > 0) {
587 __ASSERT(!list_empty(&tcd->tcd_pages));
588 tage = tage_from_list(tcd->tcd_pages.prev);
589 if (tage->used + len <= PAGE_SIZE)
590 return tage;
591 }
592
593 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
594 if (tcd->tcd_cur_stock_pages > 0) {
595 tage = tage_from_list(tcd->tcd_stock_pages.prev);
596 tcd->tcd_cur_stock_pages--;
597 list_del_init(&tage->linkage);
598 } else {
599 tage = tage_alloc(GFP_ATOMIC);
600 if (tage == NULL) {
601 printk(KERN_WARNING
602 "failure to allocate a tage (%ld)\n",
603 tcd->tcd_cur_pages);
604 return NULL;
605 }
606 }
607
608 tage->used = 0;
609 tage->cpu = smp_processor_id();
610 tage->type = tcd->tcd_type;
611 list_add_tail(&tage->linkage, &tcd->tcd_pages);
612 tcd->tcd_cur_pages++;
613
614 return tage;
615 }
616
617 return NULL;
618}
619
620/* return a page that has 'len' bytes left at the end */
621static struct trace_page *
622trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
623{
624 struct trace_page *tage;
625
626 __ASSERT(len <= PAGE_SIZE);
627
628 tage = trace_get_tage_try(tcd, len);
629 if (tage)
630 return tage;
631
632 if (tcd->tcd_cur_pages > 0) {
633 tage = tage_from_list(tcd->tcd_pages.next);
634 tage->used = 0;
635 tage_to_tail(tage, &tcd->tcd_pages);
636 }
637
638 return tage;
639}
640
641int
642spl_debug_vmsg(spl_debug_limit_state_t *cdls, int subsys, int mask,
643 const char *file, const char *fn, const int line,
644 const char *format1, va_list args, const char *format2, ...)
645{
646 struct trace_cpu_data *tcd = NULL;
b02e9b24 647 struct spl_debug_header header = { 0, };
57d1b188 648 struct trace_page *tage;
649 /* string_buf is used only if tcd != NULL, and is always set then */
650 char *string_buf = NULL;
651 char *debug_buf;
652 int known_size;
653 int needed = 85; /* average message length */
654 int max_nob;
655 va_list ap;
656 int i;
657 int remain;
658
659 if (strchr(file, '/'))
660 file = strrchr(file, '/') + 1;
661
662 trace_set_debug_header(&header, subsys, mask, line, CDEBUG_STACK());
663
664 tcd = trace_get_tcd();
665 if (tcd == NULL)
666 goto console;
667
668 if (tcd->tcd_shutting_down) {
669 trace_put_tcd(tcd);
670 tcd = NULL;
671 goto console;
672 }
673
674 known_size = strlen(file) + 1;
675 if (fn)
676 known_size += strlen(fn) + 1;
677
678 if (spl_debug_binary)
679 known_size += sizeof(header);
680
681 /* '2' used because vsnprintf returns real size required for output
682 * _without_ terminating NULL. */
683 for (i = 0; i < 2; i++) {
684 tage = trace_get_tage(tcd, needed + known_size + 1);
685 if (tage == NULL) {
686 if (needed + known_size > PAGE_SIZE)
687 mask |= D_ERROR;
688
689 trace_put_tcd(tcd);
690 tcd = NULL;
691 goto console;
692 }
693
694 string_buf = (char *)page_address(tage->page) +
695 tage->used + known_size;
696
697 max_nob = PAGE_SIZE - tage->used - known_size;
698 if (max_nob <= 0) {
699 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
700 mask |= D_ERROR;
701 trace_put_tcd(tcd);
702 tcd = NULL;
703 goto console;
704 }
705
706 needed = 0;
707 if (format1) {
708 va_copy(ap, args);
709 needed = vsnprintf(string_buf, max_nob, format1, ap);
710 va_end(ap);
711 }
712
713 if (format2) {
714 remain = max_nob - needed;
715 if (remain < 0)
716 remain = 0;
717
718 va_start(ap, format2);
719 needed += vsnprintf(string_buf+needed, remain, format2, ap);
720 va_end(ap);
721 }
722
723 if (needed < max_nob)
724 break;
725 }
726
57d1b188 727 header.ph_len = known_size + needed;
728 debug_buf = (char *)page_address(tage->page) + tage->used;
729
730 if (spl_debug_binary) {
731 memcpy(debug_buf, &header, sizeof(header));
732 tage->used += sizeof(header);
733 debug_buf += sizeof(header);
734 }
735
736 strcpy(debug_buf, file);
737 tage->used += strlen(file) + 1;
738 debug_buf += strlen(file) + 1;
739
740 if (fn) {
741 strcpy(debug_buf, fn);
742 tage->used += strlen(fn) + 1;
743 debug_buf += strlen(fn) + 1;
744 }
745
746 __ASSERT(debug_buf == string_buf);
747
748 tage->used += needed;
749 __ASSERT (tage->used <= PAGE_SIZE);
750
751console:
752 if ((mask & spl_debug_printk) == 0) {
753 /* no console output requested */
754 if (tcd != NULL)
755 trace_put_tcd(tcd);
756 return 1;
757 }
758
759 if (cdls != NULL) {
760 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
761 !time_before(cdls->cdls_next, jiffies)) {
762 /* skipping a console message */
763 cdls->cdls_count++;
764 if (tcd != NULL)
765 trace_put_tcd(tcd);
766 return 1;
767 }
768
769 if (time_before(cdls->cdls_next + spl_console_max_delay +
770 (10 * HZ), jiffies)) {
771 /* last timeout was a long time ago */
772 cdls->cdls_delay /= spl_console_backoff * 4;
773 } else {
774 cdls->cdls_delay *= spl_console_backoff;
775
776 if (cdls->cdls_delay < spl_console_min_delay)
777 cdls->cdls_delay = spl_console_min_delay;
778 else if (cdls->cdls_delay > spl_console_max_delay)
779 cdls->cdls_delay = spl_console_max_delay;
780 }
781
782 /* ensure cdls_next is never zero after it's been seen */
783 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
784 }
785
786 if (tcd != NULL) {
787 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
788 trace_put_tcd(tcd);
789 } else {
790 string_buf = trace_get_console_buffer();
791
792 needed = 0;
793 if (format1 != NULL) {
794 va_copy(ap, args);
795 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
796 va_end(ap);
797 }
798 if (format2 != NULL) {
799 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
800 if (remain > 0) {
801 va_start(ap, format2);
802 needed += vsnprintf(string_buf+needed, remain, format2, ap);
803 va_end(ap);
804 }
805 }
806 trace_print_to_console(&header, mask,
807 string_buf, needed, file, fn);
808
809 trace_put_console_buffer(string_buf);
810 }
811
812 if (cdls != NULL && cdls->cdls_count != 0) {
813 string_buf = trace_get_console_buffer();
814
815 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
816 "Skipped %d previous similar message%s\n",
817 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
818
819 trace_print_to_console(&header, mask,
820 string_buf, needed, file, fn);
821
822 trace_put_console_buffer(string_buf);
823 cdls->cdls_count = 0;
824 }
825
826 return 0;
827}
828EXPORT_SYMBOL(spl_debug_vmsg);
829
830/* Do the collect_pages job on a single CPU: assumes that all other
831 * CPUs have been stopped during a panic. If this isn't true for
832 * some arch, this will have to be implemented separately in each arch.
833 */
834static void
7fea96c0 835collect_pages_from_single_cpu(struct page_collection *pc)
57d1b188 836{
837 struct trace_cpu_data *tcd;
838 int i, j;
839
840 tcd_for_each(tcd, i, j) {
841 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
842 tcd->tcd_cur_pages = 0;
843 }
844}
845
846static void
f6c81c5e 847collect_pages_on_all_cpus(struct page_collection *pc)
57d1b188 848{
849 struct trace_cpu_data *tcd;
f6c81c5e 850 int i, cpu;
57d1b188 851
852 spin_lock(&pc->pc_lock);
f6c81c5e 853 for_each_possible_cpu(cpu) {
854 tcd_for_each_type_lock(tcd, i, cpu) {
855 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
856 tcd->tcd_cur_pages = 0;
857 }
57d1b188 858 }
859 spin_unlock(&pc->pc_lock);
860}
861
862static void
7fea96c0 863collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
57d1b188 864{
865 INIT_LIST_HEAD(&pc->pc_pages);
866
7fea96c0 867 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
868 collect_pages_from_single_cpu(pc);
57d1b188 869 else
f6c81c5e 870 collect_pages_on_all_cpus(pc);
57d1b188 871}
872
873static void
f6c81c5e 874put_pages_back_on_all_cpus(struct page_collection *pc)
57d1b188 875{
57d1b188 876 struct trace_cpu_data *tcd;
877 struct list_head *cur_head;
878 struct trace_page *tage;
879 struct trace_page *tmp;
f6c81c5e 880 int i, cpu;
57d1b188 881
882 spin_lock(&pc->pc_lock);
57d1b188 883
f6c81c5e 884 for_each_possible_cpu(cpu) {
885 tcd_for_each_type_lock(tcd, i, cpu) {
886 cur_head = tcd->tcd_pages.next;
57d1b188 887
f6c81c5e 888 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
889 linkage) {
57d1b188 890
f6c81c5e 891 __ASSERT_TAGE_INVARIANT(tage);
892
893 if (tage->cpu != cpu || tage->type != i)
894 continue;
57d1b188 895
f6c81c5e 896 tage_to_tail(tage, cur_head);
897 tcd->tcd_cur_pages++;
898 }
57d1b188 899 }
900 }
f6c81c5e 901
57d1b188 902 spin_unlock(&pc->pc_lock);
903}
904
905static void
906put_pages_back(struct page_collection *pc)
907{
908 if (!spl_panic_in_progress)
f6c81c5e 909 put_pages_back_on_all_cpus(pc);
57d1b188 910}
911
912static struct file *
913trace_filp_open (const char *name, int flags, int mode, int *err)
914{
915 struct file *filp = NULL;
916 int rc;
917
918 filp = filp_open(name, flags, mode);
919 if (IS_ERR(filp)) {
920 rc = PTR_ERR(filp);
921 printk(KERN_ERR "SPL: Can't open %s file: %d\n", name, rc);
922 if (err)
923 *err = rc;
924 filp = NULL;
925 }
926 return filp;
927}
928
929#define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
930#define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
931#define trace_filp_close(f) filp_close(f, NULL)
932#define trace_filp_poff(f) (&(f)->f_pos)
933
934static int
7fea96c0 935spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
57d1b188 936{
937 struct page_collection pc;
938 struct file *filp;
939 struct trace_page *tage;
940 struct trace_page *tmp;
941 mm_segment_t oldfs;
942 int rc = 0;
943
944 down_write(&trace_sem);
945
946 filp = trace_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
947 0600, &rc);
948 if (filp == NULL) {
949 if (rc != -EEXIST)
950 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
951 filename, rc);
952 goto out;
953 }
954
955 spin_lock_init(&pc.pc_lock);
7fea96c0 956 collect_pages(dp, &pc);
57d1b188 957 if (list_empty(&pc.pc_pages)) {
958 rc = 0;
959 goto close;
960 }
961
962 oldfs = get_fs();
963 set_fs(get_ds());
964
965 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
966 __ASSERT_TAGE_INVARIANT(tage);
967
968 rc = trace_filp_write(filp, page_address(tage->page),
969 tage->used, trace_filp_poff(filp));
970 if (rc != (int)tage->used) {
971 printk(KERN_WARNING "SPL: Wanted to write %u "
972 "but wrote %d\n", tage->used, rc);
973 put_pages_back(&pc);
974 __ASSERT(list_empty(&pc.pc_pages));
975 break;
976 }
977 list_del(&tage->linkage);
978 tage_free(tage);
979 }
980
981 set_fs(oldfs);
982
983 rc = trace_filp_fsync(filp);
984 if (rc)
985 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
986 close:
987 trace_filp_close(filp);
988 out:
989 up_write(&trace_sem);
990
991 return rc;
992}
993
994static void
995spl_debug_flush_pages(void)
996{
7fea96c0 997 dumplog_priv_t dp;
57d1b188 998 struct page_collection pc;
999 struct trace_page *tage;
1000 struct trace_page *tmp;
1001
1002 spin_lock_init(&pc.pc_lock);
7fea96c0 1003 init_waitqueue_head(&dp.dp_waitq);
1004 dp.dp_pid = current->pid;
1005 dp.dp_flags = 0;
1006 atomic_set(&dp.dp_done, 0);
57d1b188 1007
7fea96c0 1008 collect_pages(&dp, &pc);
57d1b188 1009 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1010 __ASSERT_TAGE_INVARIANT(tage);
1011 list_del(&tage->linkage);
1012 tage_free(tage);
1013 }
1014}
1015
1016unsigned long
1017spl_debug_set_mask(unsigned long mask) {
1018 spl_debug_mask = mask;
1019 return 0;
1020}
1021EXPORT_SYMBOL(spl_debug_set_mask);
1022
1023unsigned long
1024spl_debug_get_mask(void) {
1025 return spl_debug_mask;
1026}
1027EXPORT_SYMBOL(spl_debug_get_mask);
1028
1029unsigned long
1030spl_debug_set_subsys(unsigned long subsys) {
1031 spl_debug_subsys = subsys;
1032 return 0;
1033}
1034EXPORT_SYMBOL(spl_debug_set_subsys);
1035
1036unsigned long
1037spl_debug_get_subsys(void) {
1038 return spl_debug_subsys;
1039}
1040EXPORT_SYMBOL(spl_debug_get_subsys);
1041
1042int
1043spl_debug_set_mb(int mb)
1044{
1045 int i, j, pages;
1046 int limit = trace_max_debug_mb();
1047 struct trace_cpu_data *tcd;
1048
1049 if (mb < num_possible_cpus()) {
1050 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1051 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1052 return -EINVAL;
1053 }
1054
1055 if (mb > limit) {
1056 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1057 "%dMB - upper limit is %d\n", mb, limit);
1058 return -EINVAL;
1059 }
1060
1061 mb /= num_possible_cpus();
1062 pages = mb << (20 - PAGE_SHIFT);
1063
1064 down_write(&trace_sem);
1065
1066 tcd_for_each(tcd, i, j)
1067 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1068
1069 up_write(&trace_sem);
1070
1071 return 0;
1072}
1073EXPORT_SYMBOL(spl_debug_set_mb);
1074
1075int
1076spl_debug_get_mb(void)
1077{
1078 int i, j;
1079 struct trace_cpu_data *tcd;
1080 int total_pages = 0;
1081
1082 down_read(&trace_sem);
1083
1084 tcd_for_each(tcd, i, j)
1085 total_pages += tcd->tcd_max_pages;
1086
1087 up_read(&trace_sem);
1088
1089 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1090}
1091EXPORT_SYMBOL(spl_debug_get_mb);
1092
1093void spl_debug_dumpstack(struct task_struct *tsk)
1094{
1095 extern void show_task(struct task_struct *);
1096
1097 if (tsk == NULL)
1098 tsk = current;
1099
892d5106 1100 printk(KERN_ERR "SPL: Showing stack for process %d\n", tsk->pid);
57d86234 1101 dump_stack();
57d1b188 1102}
1103EXPORT_SYMBOL(spl_debug_dumpstack);
1104
7fea96c0 1105void spl_debug_bug(char *file, const char *func, const int line, int flags)
57d1b188 1106{
1107 spl_debug_catastrophe = 1;
937879f1 1108 spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
57d1b188 1109
1110 if (in_interrupt()) {
937879f1 1111 panic("SBUG in interrupt.\n");
57d1b188 1112 /* not reached */
1113 }
1114
97f8f6d7
BB
1115 if (in_atomic() || irqs_disabled())
1116 flags |= DL_NOTHREAD;
1117
57d1b188 1118 /* Ensure all debug pages and dumped by current cpu */
1119 if (spl_debug_panic_on_bug)
1120 spl_panic_in_progress = 1;
1121
1122 spl_debug_dumpstack(NULL);
7fea96c0 1123 spl_debug_dumplog(flags);
57d1b188 1124
1125 if (spl_debug_panic_on_bug)
937879f1 1126 panic("SBUG");
57d1b188 1127
1128 set_task_state(current, TASK_UNINTERRUPTIBLE);
1129 while (1)
1130 schedule();
1131}
1132EXPORT_SYMBOL(spl_debug_bug);
1133
1134int
1135spl_debug_clear_buffer(void)
1136{
1137 spl_debug_flush_pages();
1138 return 0;
1139}
1140EXPORT_SYMBOL(spl_debug_clear_buffer);
1141
1142int
1143spl_debug_mark_buffer(char *text)
1144{
1145 CDEBUG(D_WARNING, "*************************************\n");
1146 CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
1147 CDEBUG(D_WARNING, "*************************************\n");
1148
1149 return 0;
1150}
1151EXPORT_SYMBOL(spl_debug_mark_buffer);
1152
57d1b188 1153static int
1154trace_init(int max_pages)
1155{
1156 struct trace_cpu_data *tcd;
1157 int i, j;
1158
1159 init_rwsem(&trace_sem);
1160
1161 /* initialize trace_data */
1162 memset(trace_data, 0, sizeof(trace_data));
1163 for (i = 0; i < TCD_TYPE_MAX; i++) {
1164 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1165 NR_CPUS, GFP_KERNEL);
1166 if (trace_data[i] == NULL)
1167 goto out;
1168 }
1169
1170 tcd_for_each(tcd, i, j) {
f6c81c5e 1171 spin_lock_init(&tcd->tcd_lock);
57d1b188 1172 tcd->tcd_pages_factor = pages_factor[i];
1173 tcd->tcd_type = i;
1174 tcd->tcd_cpu = j;
1175 INIT_LIST_HEAD(&tcd->tcd_pages);
1176 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1177 tcd->tcd_cur_pages = 0;
1178 tcd->tcd_cur_stock_pages = 0;
1179 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1180 tcd->tcd_shutting_down = 0;
1181 }
1182
1183 for (i = 0; i < num_possible_cpus(); i++) {
1184 for (j = 0; j < 3; j++) {
1185 trace_console_buffers[i][j] =
1186 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1187 GFP_KERNEL);
1188
1189 if (trace_console_buffers[i][j] == NULL)
1190 goto out;
1191 }
1192 }
1193
1194 return 0;
1195out:
1196 trace_fini();
1197 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1198 return -ENOMEM;
1199}
1200
1201int
1202debug_init(void)
1203{
1204 int rc, max = spl_debug_mb;
1205
1206 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1207 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1208
1209 /* If spl_debug_mb is set to an invalid value or uninitialized
1210 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1211 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1212 max >= 512 || max < 0) {
1213 max = TCD_MAX_PAGES;
1214 } else {
1215 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1216 }
1217
1218 rc = trace_init(max);
1219 if (rc)
1220 return rc;
1221
d50bd9e2 1222 return rc;
57d1b188 1223}
1224
1225static void
f6c81c5e 1226trace_cleanup_on_all_cpus(void)
57d1b188 1227{
1228 struct trace_cpu_data *tcd;
1229 struct trace_page *tage;
1230 struct trace_page *tmp;
f6c81c5e 1231 int i, cpu;
57d1b188 1232
f6c81c5e 1233 for_each_possible_cpu(cpu) {
1234 tcd_for_each_type_lock(tcd, i, cpu) {
1235 tcd->tcd_shutting_down = 1;
57d1b188 1236
f6c81c5e 1237 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1238 linkage) {
1239 __ASSERT_TAGE_INVARIANT(tage);
57d1b188 1240
f6c81c5e 1241 list_del(&tage->linkage);
1242 tage_free(tage);
1243 }
1244 tcd->tcd_cur_pages = 0;
57d1b188 1245 }
57d1b188 1246 }
1247}
1248
1249static void
1250trace_fini(void)
1251{
1252 int i, j;
1253
f6c81c5e 1254 trace_cleanup_on_all_cpus();
57d1b188 1255
1256 for (i = 0; i < num_possible_cpus(); i++) {
1257 for (j = 0; j < 3; j++) {
1258 if (trace_console_buffers[i][j] != NULL) {
1259 kfree(trace_console_buffers[i][j]);
1260 trace_console_buffers[i][j] = NULL;
1261 }
1262 }
1263 }
1264
124ca8a5 1265 for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
57d1b188 1266 kfree(trace_data[i]);
1267 trace_data[i] = NULL;
1268 }
1269}
1270
1271void
1272debug_fini(void)
1273{
57d1b188 1274 trace_fini();
57d1b188 1275}