]> git.proxmox.com Git - mirror_spl.git/blob - modules/spl/spl-debug.c
Reviewed and applied spl-00-rm-gpl-symbol-notifier_chain.patch
[mirror_spl.git] / modules / spl / spl-debug.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * This file was originally part of Lustre, http://www.lustre.org.
5 * but has subsequently been adapted for use in the SPL in
6 * accordance with the GPL.
7 *
8 * Copyright (C) 2004 Cluster File Systems, Inc.
9 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
10 * Produced at Lawrence Livermore National Laboratory
11 * Written by:
12 * Zach Brown <zab@clusterfs.com>
13 * Phil Schwan <phil@clusterfs.com>
14 * Brian Behlendorf <behlendorf1@llnl.gov>,
15 * Herb Wartens <wartens2@llnl.gov>,
16 * Jim Garlick <garlick@llnl.gov>
17 * UCRL-CODE-235197
18 *
19 * This is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This is distributed in the hope that it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 */
33
34 #include <linux/kmod.h>
35 #include <linux/mm.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pagemap.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/hardirq.h>
42 #include <linux/interrupt.h>
43 #include <sys/sysmacros.h>
44 #include <sys/proc.h>
45 #include <sys/debug.h>
46 #include <spl-ctl.h>
47
48 #ifdef DEBUG_SUBSYSTEM
49 #undef DEBUG_SUBSYSTEM
50 #endif
51
52 #define DEBUG_SUBSYSTEM S_DEBUG
53
54 unsigned long spl_debug_subsys = ~0;
55 EXPORT_SYMBOL(spl_debug_subsys);
56 module_param(spl_debug_subsys, long, 0644);
57 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
58
59 unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
60 EXPORT_SYMBOL(spl_debug_mask);
61 module_param(spl_debug_mask, long, 0644);
62 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
63
64 unsigned long spl_debug_printk = D_CANTMASK;
65 EXPORT_SYMBOL(spl_debug_printk);
66 module_param(spl_debug_printk, long, 0644);
67 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
68
69 int spl_debug_mb = -1;
70 EXPORT_SYMBOL(spl_debug_mb);
71 module_param(spl_debug_mb, int, 0644);
72 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
73
74 unsigned int spl_debug_binary = 1;
75 EXPORT_SYMBOL(spl_debug_binary);
76
77 unsigned int spl_debug_catastrophe;
78 EXPORT_SYMBOL(spl_debug_catastrophe);
79
80 unsigned int spl_debug_panic_on_bug = 1;
81 EXPORT_SYMBOL(spl_debug_panic_on_bug);
82 module_param(spl_debug_panic_on_bug, int, 0644);
83 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
84
85 static char spl_debug_file_name[PATH_MAX];
86 char spl_debug_file_path[PATH_MAX] = "/var/dumps/spl-log";
87
88 unsigned int spl_console_ratelimit = 1;
89 EXPORT_SYMBOL(spl_console_ratelimit);
90
91 long spl_console_max_delay;
92 EXPORT_SYMBOL(spl_console_max_delay);
93
94 long spl_console_min_delay;
95 EXPORT_SYMBOL(spl_console_min_delay);
96
97 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
98 EXPORT_SYMBOL(spl_console_backoff);
99
100 unsigned int spl_debug_stack;
101 EXPORT_SYMBOL(spl_debug_stack);
102
103 static int spl_panic_in_progress;
104
105 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
106 char *trace_console_buffers[NR_CPUS][3];
107 struct rw_semaphore trace_sem;
108 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
109
110 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
111 static void trace_fini(void);
112
113
114 /* Memory percentage breakdown by type */
115 static unsigned int pages_factor[TCD_TYPE_MAX] = {
116 80, /* 80% pages for TCD_TYPE_PROC */
117 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
118 10 /* 10% pages for TCD_TYPE_IRQ */
119 };
120
121 const char *
122 spl_debug_subsys2str(int subsys)
123 {
124 switch (subsys) {
125 default:
126 return NULL;
127 case S_UNDEFINED:
128 return "undefined";
129 case S_ATOMIC:
130 return "atomic";
131 case S_KOBJ:
132 return "kobj";
133 case S_VNODE:
134 return "vnode";
135 case S_TIME:
136 return "time";
137 case S_RWLOCK:
138 return "rwlock";
139 case S_THREAD:
140 return "thread";
141 case S_CONDVAR:
142 return "condvar";
143 case S_MUTEX:
144 return "mutex";
145 case S_RNG:
146 return "rng";
147 case S_TASKQ:
148 return "taskq";
149 case S_KMEM:
150 return "kmem";
151 case S_DEBUG:
152 return "debug";
153 case S_GENERIC:
154 return "generic";
155 case S_PROC:
156 return "proc";
157 case S_MODULE:
158 return "module";
159 }
160 }
161
162 const char *
163 spl_debug_dbg2str(int debug)
164 {
165 switch (debug) {
166 default:
167 return NULL;
168 case D_TRACE:
169 return "trace";
170 case D_INFO:
171 return "info";
172 case D_WARNING:
173 return "warning";
174 case D_ERROR:
175 return "error";
176 case D_EMERG:
177 return "emerg";
178 case D_CONSOLE:
179 return "console";
180 case D_IOCTL:
181 return "ioctl";
182 case D_DPRINTF:
183 return "dprintf";
184 case D_OTHER:
185 return "other";
186 }
187 }
188
189 int
190 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
191 {
192 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
193 spl_debug_dbg2str;
194 const char *token;
195 int i, bit, len = 0;
196
197 if (mask == 0) { /* "0" */
198 if (size > 0)
199 str[0] = '0';
200 len = 1;
201 } else { /* space-separated tokens */
202 for (i = 0; i < 32; i++) {
203 bit = 1 << i;
204
205 if ((mask & bit) == 0)
206 continue;
207
208 token = fn(bit);
209 if (token == NULL) /* unused bit */
210 continue;
211
212 if (len > 0) { /* separator? */
213 if (len < size)
214 str[len] = ' ';
215 len++;
216 }
217
218 while (*token != 0) {
219 if (len < size)
220 str[len] = *token;
221 token++;
222 len++;
223 }
224 }
225 }
226
227 /* terminate 'str' */
228 if (len < size)
229 str[len] = 0;
230 else
231 str[size - 1] = 0;
232
233 return len;
234 }
235
236 static int
237 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
238 {
239 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
240 spl_debug_dbg2str;
241 const char *token;
242 int i, j, bit;
243
244 /* match against known tokens */
245 for (i = 0; i < 32; i++) {
246 bit = 1 << i;
247
248 token = fn(bit);
249 if (token == NULL) /* unused? */
250 continue;
251
252 /* strcasecmp */
253 for (j = 0; ; j++) {
254 if (j == len) { /* end of token */
255 if (token[j] == 0) {
256 *mask = bit;
257 return 0;
258 }
259 break;
260 }
261
262 if (token[j] == 0)
263 break;
264
265 if (str[j] == token[j])
266 continue;
267
268 if (str[j] < 'A' || 'Z' < str[j])
269 break;
270
271 if (str[j] - 'A' + 'a' != token[j])
272 break;
273 }
274 }
275
276 return -EINVAL; /* no match */
277 }
278
279 int
280 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
281 {
282 char op = 0;
283 int m = 0, matched, n, t;
284
285 /* Allow a number for backwards compatibility */
286 for (n = strlen(str); n > 0; n--)
287 if (!isspace(str[n-1]))
288 break;
289 matched = n;
290
291 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
292 *mask = m;
293 return 0;
294 }
295
296 /* <str> must be a list of debug tokens or numbers separated by
297 * whitespace and optionally an operator ('+' or '-'). If an operator
298 * appears first in <str>, '*mask' is used as the starting point
299 * (relative), otherwise 0 is used (absolute). An operator applies to
300 * all following tokens up to the next operator. */
301 matched = 0;
302 while (*str != 0) {
303 while (isspace(*str)) /* skip whitespace */
304 str++;
305
306 if (*str == 0)
307 break;
308
309 if (*str == '+' || *str == '-') {
310 op = *str++;
311
312 /* op on first token == relative */
313 if (!matched)
314 m = *mask;
315
316 while (isspace(*str)) /* skip whitespace */
317 str++;
318
319 if (*str == 0) /* trailing op */
320 return -EINVAL;
321 }
322
323 /* find token length */
324 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
325
326 /* match token */
327 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
328 return -EINVAL;
329
330 matched = 1;
331 if (op == '-')
332 m &= ~t;
333 else
334 m |= t;
335
336 str += n;
337 }
338
339 if (!matched)
340 return -EINVAL;
341
342 *mask = m;
343 return 0;
344 }
345
346 static void
347 spl_debug_dumplog_internal(dumplog_priv_t *dp)
348 {
349 void *journal_info;
350
351 journal_info = current->journal_info;
352 current->journal_info = NULL;
353
354 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
355 "%s.%ld.%ld", spl_debug_file_path,
356 get_seconds(), (long)dp->dp_pid);
357 printk(KERN_ALERT "SPL: dumping log to %s\n", spl_debug_file_name);
358 spl_debug_dump_all_pages(dp, spl_debug_file_name);
359
360 current->journal_info = journal_info;
361 }
362
363 static int
364 spl_debug_dumplog_thread(void *arg)
365 {
366 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
367
368 spl_debug_dumplog_internal(dp);
369 atomic_set(&dp->dp_done, 1);
370 wake_up(&dp->dp_waitq);
371 complete_and_exit(NULL, 0);
372
373 return 0; /* Unreachable */
374 }
375
376 /* When flag is set do not use a new thread for the debug dump */
377 int
378 spl_debug_dumplog(int flags)
379 {
380 struct task_struct *tsk;
381 dumplog_priv_t dp;
382
383 init_waitqueue_head(&dp.dp_waitq);
384 dp.dp_pid = current->pid;
385 dp.dp_flags = flags;
386 atomic_set(&dp.dp_done, 0);
387
388 if (dp.dp_flags & DL_NOTHREAD) {
389 spl_debug_dumplog_internal(&dp);
390 } else {
391
392 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
393 if (tsk == NULL)
394 return -ENOMEM;
395
396 wake_up_process(tsk);
397 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
398 }
399
400 return 0;
401 }
402 EXPORT_SYMBOL(spl_debug_dumplog);
403
404 static char *
405 trace_get_console_buffer(void)
406 {
407 int cpu = get_cpu();
408 int idx;
409
410 if (in_irq()) {
411 idx = 0;
412 } else if (in_softirq()) {
413 idx = 1;
414 } else {
415 idx = 2;
416 }
417
418 return trace_console_buffers[cpu][idx];
419 }
420
421 static void
422 trace_put_console_buffer(char *buffer)
423 {
424 put_cpu();
425 }
426
427 static struct trace_cpu_data *
428 trace_get_tcd(void)
429 {
430 int cpu;
431
432 cpu = get_cpu();
433 if (in_irq())
434 return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
435 else if (in_softirq())
436 return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
437
438 return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
439 }
440
441 static void
442 trace_put_tcd (struct trace_cpu_data *tcd)
443 {
444 put_cpu();
445 }
446
447 static int
448 trace_lock_tcd(struct trace_cpu_data *tcd)
449 {
450 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
451
452 if (tcd->tcd_type == TCD_TYPE_IRQ)
453 local_irq_disable();
454 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
455 local_bh_disable();
456
457 return 1;
458 }
459
460 static void
461 trace_unlock_tcd(struct trace_cpu_data *tcd)
462 {
463 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
464
465 if (tcd->tcd_type == TCD_TYPE_IRQ)
466 local_irq_enable();
467 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
468 local_bh_enable();
469 }
470
471 static void
472 trace_set_debug_header(struct spl_debug_header *header, int subsys,
473 int mask, const int line, unsigned long stack)
474 {
475 struct timeval tv;
476
477 do_gettimeofday(&tv);
478
479 header->ph_subsys = subsys;
480 header->ph_mask = mask;
481 header->ph_cpu_id = smp_processor_id();
482 header->ph_sec = (__u32)tv.tv_sec;
483 header->ph_usec = tv.tv_usec;
484 header->ph_stack = stack;
485 header->ph_pid = current->pid;
486 header->ph_line_num = line;
487
488 return;
489 }
490
491 static void
492 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
493 int len, const char *file, const char *fn)
494 {
495 char *prefix = "SPL", *ptype = NULL;
496
497 if ((mask & D_EMERG) != 0) {
498 prefix = "SPLError";
499 ptype = KERN_EMERG;
500 } else if ((mask & D_ERROR) != 0) {
501 prefix = "SPLError";
502 ptype = KERN_ERR;
503 } else if ((mask & D_WARNING) != 0) {
504 prefix = "SPL";
505 ptype = KERN_WARNING;
506 } else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
507 prefix = "SPL";
508 ptype = KERN_INFO;
509 }
510
511 if ((mask & D_CONSOLE) != 0) {
512 printk("%s%s: %.*s", ptype, prefix, len, buf);
513 } else {
514 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
515 hdr->ph_pid, hdr->ph_stack, file,
516 hdr->ph_line_num, fn, len, buf);
517 }
518
519 return;
520 }
521
522 static int
523 trace_max_debug_mb(void)
524 {
525 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
526 }
527
528 static void
529 trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
530 {
531 cpumask_t mask, cpus_allowed = current->cpus_allowed;
532 int cpu;
533
534 for_each_online_cpu(cpu) {
535 cpus_clear(mask);
536 cpu_set(cpu, mask);
537 set_cpus_allowed(current, mask);
538
539 fn(arg);
540
541 set_cpus_allowed(current, cpus_allowed);
542 }
543 }
544
545 static struct trace_page *
546 tage_alloc(int gfp)
547 {
548 struct page *page;
549 struct trace_page *tage;
550
551 page = alloc_pages(gfp | __GFP_NOWARN, 0);
552 if (page == NULL)
553 return NULL;
554
555 tage = kmalloc(sizeof(*tage), gfp);
556 if (tage == NULL) {
557 __free_pages(page, 0);
558 return NULL;
559 }
560
561 tage->page = page;
562 atomic_inc(&trace_tage_allocated);
563
564 return tage;
565 }
566
567 static void
568 tage_free(struct trace_page *tage)
569 {
570 __ASSERT(tage != NULL);
571 __ASSERT(tage->page != NULL);
572
573 __free_pages(tage->page, 0);
574 kfree(tage);
575 atomic_dec(&trace_tage_allocated);
576 }
577
578 static struct trace_page *
579 tage_from_list(struct list_head *list)
580 {
581 return list_entry(list, struct trace_page, linkage);
582 }
583
584 static void
585 tage_to_tail(struct trace_page *tage, struct list_head *queue)
586 {
587 __ASSERT(tage != NULL);
588 __ASSERT(queue != NULL);
589
590 list_move_tail(&tage->linkage, queue);
591 }
592
593 /* try to return a page that has 'len' bytes left at the end */
594 static struct trace_page *
595 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
596 {
597 struct trace_page *tage;
598
599 if (tcd->tcd_cur_pages > 0) {
600 __ASSERT(!list_empty(&tcd->tcd_pages));
601 tage = tage_from_list(tcd->tcd_pages.prev);
602 if (tage->used + len <= PAGE_SIZE)
603 return tage;
604 }
605
606 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
607 if (tcd->tcd_cur_stock_pages > 0) {
608 tage = tage_from_list(tcd->tcd_stock_pages.prev);
609 tcd->tcd_cur_stock_pages--;
610 list_del_init(&tage->linkage);
611 } else {
612 tage = tage_alloc(GFP_ATOMIC);
613 if (tage == NULL) {
614 printk(KERN_WARNING
615 "failure to allocate a tage (%ld)\n",
616 tcd->tcd_cur_pages);
617 return NULL;
618 }
619 }
620
621 tage->used = 0;
622 tage->cpu = smp_processor_id();
623 tage->type = tcd->tcd_type;
624 list_add_tail(&tage->linkage, &tcd->tcd_pages);
625 tcd->tcd_cur_pages++;
626
627 return tage;
628 }
629
630 return NULL;
631 }
632
633 /* return a page that has 'len' bytes left at the end */
634 static struct trace_page *
635 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
636 {
637 struct trace_page *tage;
638
639 __ASSERT(len <= PAGE_SIZE);
640
641 tage = trace_get_tage_try(tcd, len);
642 if (tage)
643 return tage;
644
645 if (tcd->tcd_cur_pages > 0) {
646 tage = tage_from_list(tcd->tcd_pages.next);
647 tage->used = 0;
648 tage_to_tail(tage, &tcd->tcd_pages);
649 }
650
651 return tage;
652 }
653
654 int
655 spl_debug_vmsg(spl_debug_limit_state_t *cdls, int subsys, int mask,
656 const char *file, const char *fn, const int line,
657 const char *format1, va_list args, const char *format2, ...)
658 {
659 struct trace_cpu_data *tcd = NULL;
660 struct spl_debug_header header;
661 struct trace_page *tage;
662 /* string_buf is used only if tcd != NULL, and is always set then */
663 char *string_buf = NULL;
664 char *debug_buf;
665 int known_size;
666 int needed = 85; /* average message length */
667 int max_nob;
668 va_list ap;
669 int i;
670 int remain;
671
672 if (strchr(file, '/'))
673 file = strrchr(file, '/') + 1;
674
675 trace_set_debug_header(&header, subsys, mask, line, CDEBUG_STACK());
676
677 tcd = trace_get_tcd();
678 if (tcd == NULL)
679 goto console;
680
681 if (tcd->tcd_shutting_down) {
682 trace_put_tcd(tcd);
683 tcd = NULL;
684 goto console;
685 }
686
687 known_size = strlen(file) + 1;
688 if (fn)
689 known_size += strlen(fn) + 1;
690
691 if (spl_debug_binary)
692 known_size += sizeof(header);
693
694 /* '2' used because vsnprintf returns real size required for output
695 * _without_ terminating NULL. */
696 for (i = 0; i < 2; i++) {
697 tage = trace_get_tage(tcd, needed + known_size + 1);
698 if (tage == NULL) {
699 if (needed + known_size > PAGE_SIZE)
700 mask |= D_ERROR;
701
702 trace_put_tcd(tcd);
703 tcd = NULL;
704 goto console;
705 }
706
707 string_buf = (char *)page_address(tage->page) +
708 tage->used + known_size;
709
710 max_nob = PAGE_SIZE - tage->used - known_size;
711 if (max_nob <= 0) {
712 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
713 mask |= D_ERROR;
714 trace_put_tcd(tcd);
715 tcd = NULL;
716 goto console;
717 }
718
719 needed = 0;
720 if (format1) {
721 va_copy(ap, args);
722 needed = vsnprintf(string_buf, max_nob, format1, ap);
723 va_end(ap);
724 }
725
726 if (format2) {
727 remain = max_nob - needed;
728 if (remain < 0)
729 remain = 0;
730
731 va_start(ap, format2);
732 needed += vsnprintf(string_buf+needed, remain, format2, ap);
733 va_end(ap);
734 }
735
736 if (needed < max_nob)
737 break;
738 }
739
740 if (unlikely(*(string_buf + needed - 1) != '\n'))
741 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
742 file, line, fn);
743
744 header.ph_len = known_size + needed;
745 debug_buf = (char *)page_address(tage->page) + tage->used;
746
747 if (spl_debug_binary) {
748 memcpy(debug_buf, &header, sizeof(header));
749 tage->used += sizeof(header);
750 debug_buf += sizeof(header);
751 }
752
753 strcpy(debug_buf, file);
754 tage->used += strlen(file) + 1;
755 debug_buf += strlen(file) + 1;
756
757 if (fn) {
758 strcpy(debug_buf, fn);
759 tage->used += strlen(fn) + 1;
760 debug_buf += strlen(fn) + 1;
761 }
762
763 __ASSERT(debug_buf == string_buf);
764
765 tage->used += needed;
766 __ASSERT (tage->used <= PAGE_SIZE);
767
768 console:
769 if ((mask & spl_debug_printk) == 0) {
770 /* no console output requested */
771 if (tcd != NULL)
772 trace_put_tcd(tcd);
773 return 1;
774 }
775
776 if (cdls != NULL) {
777 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
778 !time_before(cdls->cdls_next, jiffies)) {
779 /* skipping a console message */
780 cdls->cdls_count++;
781 if (tcd != NULL)
782 trace_put_tcd(tcd);
783 return 1;
784 }
785
786 if (time_before(cdls->cdls_next + spl_console_max_delay +
787 (10 * HZ), jiffies)) {
788 /* last timeout was a long time ago */
789 cdls->cdls_delay /= spl_console_backoff * 4;
790 } else {
791 cdls->cdls_delay *= spl_console_backoff;
792
793 if (cdls->cdls_delay < spl_console_min_delay)
794 cdls->cdls_delay = spl_console_min_delay;
795 else if (cdls->cdls_delay > spl_console_max_delay)
796 cdls->cdls_delay = spl_console_max_delay;
797 }
798
799 /* ensure cdls_next is never zero after it's been seen */
800 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
801 }
802
803 if (tcd != NULL) {
804 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
805 trace_put_tcd(tcd);
806 } else {
807 string_buf = trace_get_console_buffer();
808
809 needed = 0;
810 if (format1 != NULL) {
811 va_copy(ap, args);
812 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
813 va_end(ap);
814 }
815 if (format2 != NULL) {
816 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
817 if (remain > 0) {
818 va_start(ap, format2);
819 needed += vsnprintf(string_buf+needed, remain, format2, ap);
820 va_end(ap);
821 }
822 }
823 trace_print_to_console(&header, mask,
824 string_buf, needed, file, fn);
825
826 trace_put_console_buffer(string_buf);
827 }
828
829 if (cdls != NULL && cdls->cdls_count != 0) {
830 string_buf = trace_get_console_buffer();
831
832 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
833 "Skipped %d previous similar message%s\n",
834 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
835
836 trace_print_to_console(&header, mask,
837 string_buf, needed, file, fn);
838
839 trace_put_console_buffer(string_buf);
840 cdls->cdls_count = 0;
841 }
842
843 return 0;
844 }
845 EXPORT_SYMBOL(spl_debug_vmsg);
846
847 /* Do the collect_pages job on a single CPU: assumes that all other
848 * CPUs have been stopped during a panic. If this isn't true for
849 * some arch, this will have to be implemented separately in each arch.
850 */
851 static void
852 collect_pages_from_single_cpu(struct page_collection *pc)
853 {
854 struct trace_cpu_data *tcd;
855 int i, j;
856
857 tcd_for_each(tcd, i, j) {
858 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
859 tcd->tcd_cur_pages = 0;
860 }
861 }
862
863 static void
864 collect_pages_on_cpu(void *info)
865 {
866 struct trace_cpu_data *tcd;
867 struct page_collection *pc = info;
868 int i;
869
870 spin_lock(&pc->pc_lock);
871 tcd_for_each_type_lock(tcd, i) {
872 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
873 tcd->tcd_cur_pages = 0;
874 }
875 spin_unlock(&pc->pc_lock);
876 }
877
878 static void
879 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
880 {
881 INIT_LIST_HEAD(&pc->pc_pages);
882
883 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
884 collect_pages_from_single_cpu(pc);
885 else
886 trace_call_on_all_cpus(collect_pages_on_cpu, pc);
887 }
888
889 static void
890 put_pages_back_on_cpu(void *info)
891 {
892 struct page_collection *pc = info;
893 struct trace_cpu_data *tcd;
894 struct list_head *cur_head;
895 struct trace_page *tage;
896 struct trace_page *tmp;
897 int i;
898
899 spin_lock(&pc->pc_lock);
900 tcd_for_each_type_lock(tcd, i) {
901 cur_head = tcd->tcd_pages.next;
902
903 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
904
905 __ASSERT_TAGE_INVARIANT(tage);
906
907 if (tage->cpu != smp_processor_id() || tage->type != i)
908 continue;
909
910 tage_to_tail(tage, cur_head);
911 tcd->tcd_cur_pages++;
912 }
913 }
914 spin_unlock(&pc->pc_lock);
915 }
916
917 static void
918 put_pages_back(struct page_collection *pc)
919 {
920 if (!spl_panic_in_progress)
921 trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
922 }
923
924 static struct file *
925 trace_filp_open (const char *name, int flags, int mode, int *err)
926 {
927 struct file *filp = NULL;
928 int rc;
929
930 filp = filp_open(name, flags, mode);
931 if (IS_ERR(filp)) {
932 rc = PTR_ERR(filp);
933 printk(KERN_ERR "SPL: Can't open %s file: %d\n", name, rc);
934 if (err)
935 *err = rc;
936 filp = NULL;
937 }
938 return filp;
939 }
940
941 #define trace_filp_write(fp, b, s, p) (fp)->f_op->write((fp), (b), (s), p)
942 #define trace_filp_fsync(fp) (fp)->f_op->fsync((fp),(fp)->f_dentry,1)
943 #define trace_filp_close(f) filp_close(f, NULL)
944 #define trace_filp_poff(f) (&(f)->f_pos)
945
946 static int
947 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
948 {
949 struct page_collection pc;
950 struct file *filp;
951 struct trace_page *tage;
952 struct trace_page *tmp;
953 mm_segment_t oldfs;
954 int rc = 0;
955
956 down_write(&trace_sem);
957
958 filp = trace_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
959 0600, &rc);
960 if (filp == NULL) {
961 if (rc != -EEXIST)
962 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
963 filename, rc);
964 goto out;
965 }
966
967 spin_lock_init(&pc.pc_lock);
968 collect_pages(dp, &pc);
969 if (list_empty(&pc.pc_pages)) {
970 rc = 0;
971 goto close;
972 }
973
974 oldfs = get_fs();
975 set_fs(get_ds());
976
977 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
978 __ASSERT_TAGE_INVARIANT(tage);
979
980 rc = trace_filp_write(filp, page_address(tage->page),
981 tage->used, trace_filp_poff(filp));
982 if (rc != (int)tage->used) {
983 printk(KERN_WARNING "SPL: Wanted to write %u "
984 "but wrote %d\n", tage->used, rc);
985 put_pages_back(&pc);
986 __ASSERT(list_empty(&pc.pc_pages));
987 break;
988 }
989 list_del(&tage->linkage);
990 tage_free(tage);
991 }
992
993 set_fs(oldfs);
994
995 rc = trace_filp_fsync(filp);
996 if (rc)
997 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
998 close:
999 trace_filp_close(filp);
1000 out:
1001 up_write(&trace_sem);
1002
1003 return rc;
1004 }
1005
1006 static void
1007 spl_debug_flush_pages(void)
1008 {
1009 dumplog_priv_t dp;
1010 struct page_collection pc;
1011 struct trace_page *tage;
1012 struct trace_page *tmp;
1013
1014 spin_lock_init(&pc.pc_lock);
1015 init_waitqueue_head(&dp.dp_waitq);
1016 dp.dp_pid = current->pid;
1017 dp.dp_flags = 0;
1018 atomic_set(&dp.dp_done, 0);
1019
1020 collect_pages(&dp, &pc);
1021 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1022 __ASSERT_TAGE_INVARIANT(tage);
1023 list_del(&tage->linkage);
1024 tage_free(tage);
1025 }
1026 }
1027
1028 unsigned long
1029 spl_debug_set_mask(unsigned long mask) {
1030 spl_debug_mask = mask;
1031 return 0;
1032 }
1033 EXPORT_SYMBOL(spl_debug_set_mask);
1034
1035 unsigned long
1036 spl_debug_get_mask(void) {
1037 return spl_debug_mask;
1038 }
1039 EXPORT_SYMBOL(spl_debug_get_mask);
1040
1041 unsigned long
1042 spl_debug_set_subsys(unsigned long subsys) {
1043 spl_debug_subsys = subsys;
1044 return 0;
1045 }
1046 EXPORT_SYMBOL(spl_debug_set_subsys);
1047
1048 unsigned long
1049 spl_debug_get_subsys(void) {
1050 return spl_debug_subsys;
1051 }
1052 EXPORT_SYMBOL(spl_debug_get_subsys);
1053
1054 int
1055 spl_debug_set_mb(int mb)
1056 {
1057 int i, j, pages;
1058 int limit = trace_max_debug_mb();
1059 struct trace_cpu_data *tcd;
1060
1061 if (mb < num_possible_cpus()) {
1062 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1063 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1064 return -EINVAL;
1065 }
1066
1067 if (mb > limit) {
1068 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1069 "%dMB - upper limit is %d\n", mb, limit);
1070 return -EINVAL;
1071 }
1072
1073 mb /= num_possible_cpus();
1074 pages = mb << (20 - PAGE_SHIFT);
1075
1076 down_write(&trace_sem);
1077
1078 tcd_for_each(tcd, i, j)
1079 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1080
1081 up_write(&trace_sem);
1082
1083 return 0;
1084 }
1085 EXPORT_SYMBOL(spl_debug_set_mb);
1086
1087 int
1088 spl_debug_get_mb(void)
1089 {
1090 int i, j;
1091 struct trace_cpu_data *tcd;
1092 int total_pages = 0;
1093
1094 down_read(&trace_sem);
1095
1096 tcd_for_each(tcd, i, j)
1097 total_pages += tcd->tcd_max_pages;
1098
1099 up_read(&trace_sem);
1100
1101 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1102 }
1103 EXPORT_SYMBOL(spl_debug_get_mb);
1104
1105 void spl_debug_dumpstack(struct task_struct *tsk)
1106 {
1107 extern void show_task(struct task_struct *);
1108
1109 if (tsk == NULL)
1110 tsk = current;
1111
1112 printk(KERN_ERR "SPL: Showing stack for process %d\n", tsk->pid);
1113 dump_stack();
1114 }
1115 EXPORT_SYMBOL(spl_debug_dumpstack);
1116
1117 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1118 {
1119 spl_debug_catastrophe = 1;
1120 spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
1121
1122 if (in_interrupt()) {
1123 panic("SBUG in interrupt.\n");
1124 /* not reached */
1125 }
1126
1127 /* Ensure all debug pages and dumped by current cpu */
1128 if (spl_debug_panic_on_bug)
1129 spl_panic_in_progress = 1;
1130
1131 spl_debug_dumpstack(NULL);
1132 spl_debug_dumplog(flags);
1133
1134 if (spl_debug_panic_on_bug)
1135 panic("SBUG");
1136
1137 set_task_state(current, TASK_UNINTERRUPTIBLE);
1138 while (1)
1139 schedule();
1140 }
1141 EXPORT_SYMBOL(spl_debug_bug);
1142
1143 int
1144 spl_debug_clear_buffer(void)
1145 {
1146 spl_debug_flush_pages();
1147 return 0;
1148 }
1149 EXPORT_SYMBOL(spl_debug_clear_buffer);
1150
1151 int
1152 spl_debug_mark_buffer(char *text)
1153 {
1154 CDEBUG(D_WARNING, "*************************************\n");
1155 CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
1156 CDEBUG(D_WARNING, "*************************************\n");
1157
1158 return 0;
1159 }
1160 EXPORT_SYMBOL(spl_debug_mark_buffer);
1161
1162 static int
1163 trace_init(int max_pages)
1164 {
1165 struct trace_cpu_data *tcd;
1166 int i, j;
1167
1168 init_rwsem(&trace_sem);
1169
1170 /* initialize trace_data */
1171 memset(trace_data, 0, sizeof(trace_data));
1172 for (i = 0; i < TCD_TYPE_MAX; i++) {
1173 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1174 NR_CPUS, GFP_KERNEL);
1175 if (trace_data[i] == NULL)
1176 goto out;
1177 }
1178
1179 tcd_for_each(tcd, i, j) {
1180 tcd->tcd_pages_factor = pages_factor[i];
1181 tcd->tcd_type = i;
1182 tcd->tcd_cpu = j;
1183 INIT_LIST_HEAD(&tcd->tcd_pages);
1184 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1185 tcd->tcd_cur_pages = 0;
1186 tcd->tcd_cur_stock_pages = 0;
1187 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1188 tcd->tcd_shutting_down = 0;
1189 }
1190
1191 for (i = 0; i < num_possible_cpus(); i++) {
1192 for (j = 0; j < 3; j++) {
1193 trace_console_buffers[i][j] =
1194 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1195 GFP_KERNEL);
1196
1197 if (trace_console_buffers[i][j] == NULL)
1198 goto out;
1199 }
1200 }
1201
1202 return 0;
1203 out:
1204 trace_fini();
1205 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1206 return -ENOMEM;
1207 }
1208
1209 int
1210 debug_init(void)
1211 {
1212 int rc, max = spl_debug_mb;
1213
1214 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1215 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1216
1217 /* If spl_debug_mb is set to an invalid value or uninitialized
1218 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1219 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1220 max >= 512 || max < 0) {
1221 max = TCD_MAX_PAGES;
1222 } else {
1223 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1224 }
1225
1226 rc = trace_init(max);
1227 if (rc)
1228 return rc;
1229
1230 return rc;
1231 }
1232
1233 static void
1234 trace_cleanup_on_cpu(void *info)
1235 {
1236 struct trace_cpu_data *tcd;
1237 struct trace_page *tage;
1238 struct trace_page *tmp;
1239 int i;
1240
1241 tcd_for_each_type_lock(tcd, i) {
1242 tcd->tcd_shutting_down = 1;
1243
1244 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1245 __ASSERT_TAGE_INVARIANT(tage);
1246
1247 list_del(&tage->linkage);
1248 tage_free(tage);
1249 }
1250 tcd->tcd_cur_pages = 0;
1251 }
1252 }
1253
1254 static void
1255 trace_fini(void)
1256 {
1257 int i, j;
1258
1259 trace_call_on_all_cpus(trace_cleanup_on_cpu, NULL);
1260
1261 for (i = 0; i < num_possible_cpus(); i++) {
1262 for (j = 0; j < 3; j++) {
1263 if (trace_console_buffers[i][j] != NULL) {
1264 kfree(trace_console_buffers[i][j]);
1265 trace_console_buffers[i][j] = NULL;
1266 }
1267 }
1268 }
1269
1270 for (i = 0; trace_data[i] != NULL; i++) {
1271 kfree(trace_data[i]);
1272 trace_data[i] = NULL;
1273 }
1274 }
1275
1276 void
1277 debug_fini(void)
1278 {
1279 trace_fini();
1280 }