2 * Persistent Storage - platform driver interface parts.
4 * Copyright (C) 2007-2008 Google, Inc.
5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define pr_fmt(fmt) "pstore: " fmt
23 #include <linux/atomic.h>
24 #include <linux/types.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kmsg_dump.h>
28 #include <linux/console.h>
29 #include <linux/module.h>
30 #include <linux/pstore.h>
31 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
32 #include <linux/zlib.h>
34 #ifdef CONFIG_PSTORE_LZO_COMPRESS
35 #include <linux/lzo.h>
37 #ifdef CONFIG_PSTORE_LZ4_COMPRESS
38 #include <linux/lz4.h>
40 #include <linux/string.h>
41 #include <linux/timer.h>
42 #include <linux/slab.h>
43 #include <linux/uaccess.h>
44 #include <linux/hardirq.h>
45 #include <linux/jiffies.h>
46 #include <linux/workqueue.h>
51 * We defer making "oops" entries appear in pstore - see
52 * whether the system is actually still running well enough
53 * to let someone see the entry
55 static int pstore_update_ms
= -1;
56 module_param_named(update_ms
, pstore_update_ms
, int, 0600);
57 MODULE_PARM_DESC(update_ms
, "milliseconds before pstore updates its content "
58 "(default is -1, which means runtime updates are disabled; "
59 "enabling this option is not safe, it may lead to further "
60 "corruption on Oopses)");
62 static int pstore_new_entry
;
64 static void pstore_timefunc(unsigned long);
65 static DEFINE_TIMER(pstore_timer
, pstore_timefunc
, 0, 0);
67 static void pstore_dowork(struct work_struct
*);
68 static DECLARE_WORK(pstore_work
, pstore_dowork
);
71 * pstore_lock just protects "psinfo" during
72 * calls to pstore_register()
74 static DEFINE_SPINLOCK(pstore_lock
);
75 struct pstore_info
*psinfo
;
79 /* Compression parameters */
80 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
82 #define WINDOW_BITS 12
84 static struct z_stream_s stream
;
86 static unsigned char *workspace
;
89 struct pstore_zbackend
{
90 int (*compress
)(const void *in
, void *out
, size_t inlen
, size_t outlen
);
91 int (*decompress
)(void *in
, void *out
, size_t inlen
, size_t outlen
);
92 void (*allocate
)(void);
98 static char *big_oops_buf
;
99 static size_t big_oops_buf_sz
;
101 /* How much of the console log to snapshot */
102 static unsigned long kmsg_bytes
= 10240;
104 void pstore_set_kmsg_bytes(int bytes
)
109 /* Tag each group of saved records with a sequence number */
110 static int oopscount
;
112 static const char *get_reason_str(enum kmsg_dump_reason reason
)
115 case KMSG_DUMP_PANIC
:
119 case KMSG_DUMP_EMERG
:
121 case KMSG_DUMP_RESTART
:
125 case KMSG_DUMP_POWEROFF
:
132 bool pstore_cannot_block_path(enum kmsg_dump_reason reason
)
135 * In case of NMI path, pstore shouldn't be blocked
136 * regardless of reason.
142 /* In panic case, other cpus are stopped by smp_send_stop(). */
143 case KMSG_DUMP_PANIC
:
144 /* Emergency restart shouldn't be blocked by spin lock. */
145 case KMSG_DUMP_EMERG
:
151 EXPORT_SYMBOL_GPL(pstore_cannot_block_path
);
153 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
154 /* Derived from logfs_compress() */
155 static int compress_zlib(const void *in
, void *out
, size_t inlen
, size_t outlen
)
160 err
= zlib_deflateInit2(&stream
, COMPR_LEVEL
, Z_DEFLATED
, WINDOW_BITS
,
161 MEM_LEVEL
, Z_DEFAULT_STRATEGY
);
166 stream
.avail_in
= inlen
;
168 stream
.next_out
= out
;
169 stream
.avail_out
= outlen
;
170 stream
.total_out
= 0;
172 err
= zlib_deflate(&stream
, Z_FINISH
);
173 if (err
!= Z_STREAM_END
)
176 err
= zlib_deflateEnd(&stream
);
180 if (stream
.total_out
>= stream
.total_in
)
183 ret
= stream
.total_out
;
188 /* Derived from logfs_uncompress */
189 static int decompress_zlib(void *in
, void *out
, size_t inlen
, size_t outlen
)
194 err
= zlib_inflateInit2(&stream
, WINDOW_BITS
);
199 stream
.avail_in
= inlen
;
201 stream
.next_out
= out
;
202 stream
.avail_out
= outlen
;
203 stream
.total_out
= 0;
205 err
= zlib_inflate(&stream
, Z_FINISH
);
206 if (err
!= Z_STREAM_END
)
209 err
= zlib_inflateEnd(&stream
);
213 ret
= stream
.total_out
;
218 static void allocate_zlib(void)
223 switch (psinfo
->bufsize
) {
224 /* buffer range for efivars */
234 /* buffer range for nvram, erst */
243 big_oops_buf_sz
= (psinfo
->bufsize
* 100) / cmpr
;
244 big_oops_buf
= kmalloc(big_oops_buf_sz
, GFP_KERNEL
);
246 size
= max(zlib_deflate_workspacesize(WINDOW_BITS
, MEM_LEVEL
),
247 zlib_inflate_workspacesize());
248 stream
.workspace
= kmalloc(size
, GFP_KERNEL
);
249 if (!stream
.workspace
) {
250 pr_err("No memory for compression workspace; skipping compression\n");
255 pr_err("No memory for uncompressed data; skipping compression\n");
256 stream
.workspace
= NULL
;
261 static void free_zlib(void)
263 kfree(stream
.workspace
);
264 stream
.workspace
= NULL
;
270 static struct pstore_zbackend backend_zlib
= {
271 .compress
= compress_zlib
,
272 .decompress
= decompress_zlib
,
273 .allocate
= allocate_zlib
,
279 #ifdef CONFIG_PSTORE_LZO_COMPRESS
280 static int compress_lzo(const void *in
, void *out
, size_t inlen
, size_t outlen
)
284 ret
= lzo1x_1_compress(in
, inlen
, out
, &outlen
, workspace
);
285 if (ret
!= LZO_E_OK
) {
286 pr_err("lzo_compress error, ret = %d!\n", ret
);
293 static int decompress_lzo(void *in
, void *out
, size_t inlen
, size_t outlen
)
297 ret
= lzo1x_decompress_safe(in
, inlen
, out
, &outlen
);
298 if (ret
!= LZO_E_OK
) {
299 pr_err("lzo_decompress error, ret = %d!\n", ret
);
306 static void allocate_lzo(void)
308 big_oops_buf_sz
= lzo1x_worst_compress(psinfo
->bufsize
);
309 big_oops_buf
= kmalloc(big_oops_buf_sz
, GFP_KERNEL
);
311 workspace
= kmalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
313 pr_err("No memory for compression workspace; skipping compression\n");
318 pr_err("No memory for uncompressed data; skipping compression\n");
323 static void free_lzo(void)
331 static struct pstore_zbackend backend_lzo
= {
332 .compress
= compress_lzo
,
333 .decompress
= decompress_lzo
,
334 .allocate
= allocate_lzo
,
340 #ifdef CONFIG_PSTORE_LZ4_COMPRESS
341 static int compress_lz4(const void *in
, void *out
, size_t inlen
, size_t outlen
)
345 ret
= lz4_compress(in
, inlen
, out
, &outlen
, workspace
);
347 pr_err("lz4_compress error, ret = %d!\n", ret
);
354 static int decompress_lz4(void *in
, void *out
, size_t inlen
, size_t outlen
)
358 ret
= lz4_decompress_unknownoutputsize(in
, inlen
, out
, &outlen
);
360 pr_err("lz4_decompress error, ret = %d!\n", ret
);
367 static void allocate_lz4(void)
369 big_oops_buf_sz
= lz4_compressbound(psinfo
->bufsize
);
370 big_oops_buf
= kmalloc(big_oops_buf_sz
, GFP_KERNEL
);
372 workspace
= kmalloc(LZ4_MEM_COMPRESS
, GFP_KERNEL
);
374 pr_err("No memory for compression workspace; skipping compression\n");
379 pr_err("No memory for uncompressed data; skipping compression\n");
384 static void free_lz4(void)
392 static struct pstore_zbackend backend_lz4
= {
393 .compress
= compress_lz4
,
394 .decompress
= decompress_lz4
,
395 .allocate
= allocate_lz4
,
401 static struct pstore_zbackend
*zbackend
=
402 #if defined(CONFIG_PSTORE_ZLIB_COMPRESS)
404 #elif defined(CONFIG_PSTORE_LZO_COMPRESS)
406 #elif defined(CONFIG_PSTORE_LZ4_COMPRESS)
412 static int pstore_compress(const void *in
, void *out
,
413 size_t inlen
, size_t outlen
)
416 return zbackend
->compress(in
, out
, inlen
, outlen
);
421 static int pstore_decompress(void *in
, void *out
, size_t inlen
, size_t outlen
)
424 return zbackend
->decompress(in
, out
, inlen
, outlen
);
429 static void allocate_buf_for_compression(void)
432 pr_info("using %s compression\n", zbackend
->name
);
433 zbackend
->allocate();
435 pr_err("allocate compression buffer error!\n");
439 static void free_buf_for_compression(void)
444 pr_err("free compression buffer error!\n");
448 * Called when compression fails, since the printk buffer
449 * would be fetched for compression calling it again when
450 * compression fails would have moved the iterator of
451 * printk buffer which results in fetching old contents.
452 * Copy the recent messages from big_oops_buf to psinfo->buf
454 static size_t copy_kmsg_to_buffer(int hsize
, size_t len
)
459 total_len
= hsize
+ len
;
461 if (total_len
> psinfo
->bufsize
) {
462 diff
= total_len
- psinfo
->bufsize
+ hsize
;
463 memcpy(psinfo
->buf
, big_oops_buf
, hsize
);
464 memcpy(psinfo
->buf
+ hsize
, big_oops_buf
+ diff
,
465 psinfo
->bufsize
- hsize
);
466 total_len
= psinfo
->bufsize
;
468 memcpy(psinfo
->buf
, big_oops_buf
, total_len
);
474 * callback from kmsg_dump. (s2,l2) has the most recently
475 * written bytes, older bytes are in (s1,l1). Save as much
476 * as we can from the end of the buffer.
478 static void pstore_dump(struct kmsg_dumper
*dumper
,
479 enum kmsg_dump_reason reason
)
481 unsigned long total
= 0;
484 unsigned int part
= 1;
485 unsigned long flags
= 0;
489 why
= get_reason_str(reason
);
491 if (pstore_cannot_block_path(reason
)) {
492 is_locked
= spin_trylock_irqsave(&psinfo
->buf_lock
, flags
);
494 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
495 , in_nmi() ? "NMI" : why
);
499 spin_lock_irqsave(&psinfo
->buf_lock
, flags
);
503 while (total
< kmsg_bytes
) {
509 bool compressed
= false;
512 if (big_oops_buf
&& is_locked
) {
514 size
= big_oops_buf_sz
;
517 size
= psinfo
->bufsize
;
520 hsize
= sprintf(dst
, "%s#%d Part%u\n", why
, oopscount
, part
);
523 if (!kmsg_dump_get_buffer(dumper
, true, dst
+ hsize
,
527 if (big_oops_buf
&& is_locked
) {
528 zipped_len
= pstore_compress(dst
, psinfo
->buf
,
529 hsize
+ len
, psinfo
->bufsize
);
531 if (zipped_len
> 0) {
533 total_len
= zipped_len
;
535 total_len
= copy_kmsg_to_buffer(hsize
, len
);
538 total_len
= hsize
+ len
;
541 ret
= psinfo
->write(PSTORE_TYPE_DMESG
, reason
, &id
, part
,
542 oopscount
, compressed
, total_len
, psinfo
);
543 if (ret
== 0 && reason
== KMSG_DUMP_OOPS
&& pstore_is_mounted())
544 pstore_new_entry
= 1;
550 spin_unlock_irqrestore(&psinfo
->buf_lock
, flags
);
553 static struct kmsg_dumper pstore_dumper
= {
558 * Register with kmsg_dump to save last part of console log on panic.
560 static void pstore_register_kmsg(void)
562 kmsg_dump_register(&pstore_dumper
);
565 static void pstore_unregister_kmsg(void)
567 kmsg_dump_unregister(&pstore_dumper
);
570 #ifdef CONFIG_PSTORE_CONSOLE
571 static void pstore_console_write(struct console
*con
, const char *s
, unsigned c
)
573 const char *e
= s
+ c
;
579 if (c
> psinfo
->bufsize
)
582 if (oops_in_progress
) {
583 if (!spin_trylock_irqsave(&psinfo
->buf_lock
, flags
))
586 spin_lock_irqsave(&psinfo
->buf_lock
, flags
);
588 psinfo
->write_buf(PSTORE_TYPE_CONSOLE
, 0, &id
, 0,
590 spin_unlock_irqrestore(&psinfo
->buf_lock
, flags
);
596 static struct console pstore_console
= {
598 .write
= pstore_console_write
,
599 .flags
= CON_PRINTBUFFER
| CON_ENABLED
| CON_ANYTIME
,
603 static void pstore_register_console(void)
605 register_console(&pstore_console
);
608 static void pstore_unregister_console(void)
610 unregister_console(&pstore_console
);
613 static void pstore_register_console(void) {}
614 static void pstore_unregister_console(void) {}
617 static int pstore_write_compat(enum pstore_type_id type
,
618 enum kmsg_dump_reason reason
,
619 u64
*id
, unsigned int part
, int count
,
620 bool compressed
, size_t size
,
621 struct pstore_info
*psi
)
623 return psi
->write_buf(type
, reason
, id
, part
, psinfo
->buf
, compressed
,
627 static int pstore_write_buf_user_compat(enum pstore_type_id type
,
628 enum kmsg_dump_reason reason
,
629 u64
*id
, unsigned int part
,
630 const char __user
*buf
,
631 bool compressed
, size_t size
,
632 struct pstore_info
*psi
)
634 unsigned long flags
= 0;
635 size_t i
, bufsize
= size
;
638 if (unlikely(!access_ok(VERIFY_READ
, buf
, size
)))
640 if (bufsize
> psinfo
->bufsize
)
641 bufsize
= psinfo
->bufsize
;
642 spin_lock_irqsave(&psinfo
->buf_lock
, flags
);
643 for (i
= 0; i
< size
; ) {
644 size_t c
= min(size
- i
, bufsize
);
646 ret
= __copy_from_user(psinfo
->buf
, buf
+ i
, c
);
647 if (unlikely(ret
!= 0)) {
651 ret
= psi
->write_buf(type
, reason
, id
, part
, psinfo
->buf
,
653 if (unlikely(ret
< 0))
657 spin_unlock_irqrestore(&psinfo
->buf_lock
, flags
);
658 return unlikely(ret
< 0) ? ret
: size
;
662 * platform specific persistent storage driver registers with
663 * us here. If pstore is already mounted, call the platform
664 * read function right away to populate the file system. If not
665 * then the pstore mount code will call us later to fill out
668 int pstore_register(struct pstore_info
*psi
)
670 struct module
*owner
= psi
->owner
;
672 if (backend
&& strcmp(backend
, psi
->name
))
675 spin_lock(&pstore_lock
);
677 spin_unlock(&pstore_lock
);
682 psi
->write
= pstore_write_compat
;
683 if (!psi
->write_buf_user
)
684 psi
->write_buf_user
= pstore_write_buf_user_compat
;
686 mutex_init(&psinfo
->read_mutex
);
687 spin_unlock(&pstore_lock
);
689 if (owner
&& !try_module_get(owner
)) {
694 allocate_buf_for_compression();
696 if (pstore_is_mounted())
697 pstore_get_records(0);
699 if (psi
->flags
& PSTORE_FLAGS_DMESG
)
700 pstore_register_kmsg();
701 if (psi
->flags
& PSTORE_FLAGS_CONSOLE
)
702 pstore_register_console();
703 if (psi
->flags
& PSTORE_FLAGS_FTRACE
)
704 pstore_register_ftrace();
705 if (psi
->flags
& PSTORE_FLAGS_PMSG
)
706 pstore_register_pmsg();
708 if (pstore_update_ms
>= 0) {
709 pstore_timer
.expires
= jiffies
+
710 msecs_to_jiffies(pstore_update_ms
);
711 add_timer(&pstore_timer
);
715 * Update the module parameter backend, so it is visible
716 * through /sys/module/pstore/parameters/backend
722 pr_info("Registered %s as persistent store backend\n", psi
->name
);
726 EXPORT_SYMBOL_GPL(pstore_register
);
728 void pstore_unregister(struct pstore_info
*psi
)
730 if (psi
->flags
& PSTORE_FLAGS_PMSG
)
731 pstore_unregister_pmsg();
732 if (psi
->flags
& PSTORE_FLAGS_FTRACE
)
733 pstore_unregister_ftrace();
734 if (psi
->flags
& PSTORE_FLAGS_CONSOLE
)
735 pstore_unregister_console();
736 if (psi
->flags
& PSTORE_FLAGS_DMESG
)
737 pstore_unregister_kmsg();
739 free_buf_for_compression();
744 EXPORT_SYMBOL_GPL(pstore_unregister
);
747 * Read all the records from the persistent store. Create
748 * files in our filesystem. Don't warn about -EEXIST errors
749 * when we are re-scanning the backing store looking to add new
752 void pstore_get_records(int quiet
)
754 struct pstore_info
*psi
= psinfo
;
759 enum pstore_type_id type
;
760 struct timespec time
;
763 int unzipped_len
= -1;
764 ssize_t ecc_notice_size
= 0;
769 mutex_lock(&psi
->read_mutex
);
770 if (psi
->open
&& psi
->open(psi
))
773 while ((size
= psi
->read(&id
, &type
, &count
, &time
, &buf
, &compressed
,
774 &ecc_notice_size
, psi
)) > 0) {
775 if (compressed
&& (type
== PSTORE_TYPE_DMESG
)) {
777 unzipped_len
= pstore_decompress(buf
,
781 if (unzipped_len
> 0) {
783 memcpy(big_oops_buf
+ unzipped_len
,
784 buf
+ size
, ecc_notice_size
);
790 pr_err("decompression failed;returned %d\n",
795 rc
= pstore_mkfile(type
, psi
->name
, id
, count
, buf
,
796 compressed
, size
+ ecc_notice_size
,
798 if (unzipped_len
< 0) {
799 /* Free buffer other than big oops */
804 if (rc
&& (rc
!= -EEXIST
|| !quiet
))
810 mutex_unlock(&psi
->read_mutex
);
813 pr_warn("failed to load %d record(s) from '%s'\n",
817 static void pstore_dowork(struct work_struct
*work
)
819 pstore_get_records(1);
822 static void pstore_timefunc(unsigned long dummy
)
824 if (pstore_new_entry
) {
825 pstore_new_entry
= 0;
826 schedule_work(&pstore_work
);
829 mod_timer(&pstore_timer
, jiffies
+ msecs_to_jiffies(pstore_update_ms
));
832 module_param(backend
, charp
, 0444);
833 MODULE_PARM_DESC(backend
, "Pstore backend to use");