2 * nmi.c - Safe printk in NMI context
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/cpumask.h>
22 #include <linux/irq_work.h>
23 #include <linux/printk.h>
28 * printk() could not take logbuf_lock in NMI context. Instead,
29 * it uses an alternative implementation that temporary stores
30 * the strings into a per-CPU buffer. The content of the buffer
31 * is later flushed into the main ring buffer via IRQ work.
33 * The alternative implementation is chosen transparently
34 * via @printk_func per-CPU variable.
36 * The implementation allows to flush the strings also from another CPU.
37 * There are situations when we want to make sure that all buffers
38 * were handled or when IRQs are blocked.
40 DEFINE_PER_CPU(printk_func_t
, printk_func
) = vprintk_default
;
41 static int printk_nmi_irq_ready
;
42 atomic_t nmi_message_lost
;
44 #define NMI_LOG_BUF_LEN ((1 << CONFIG_NMI_LOG_BUF_SHIFT) - \
45 sizeof(atomic_t) - sizeof(struct irq_work))
48 atomic_t len
; /* length of written data */
49 struct irq_work work
; /* IRQ work that flushes the buffer */
50 unsigned char buffer
[NMI_LOG_BUF_LEN
];
52 static DEFINE_PER_CPU(struct nmi_seq_buf
, nmi_print_seq
);
55 * Safe printk() for NMI context. It uses a per-CPU buffer to
56 * store the message. NMIs are not nested, so there is always only
57 * one writer running. But the buffer might get flushed from another
58 * CPU, so we need to be careful.
60 static int vprintk_nmi(const char *fmt
, va_list args
)
62 struct nmi_seq_buf
*s
= this_cpu_ptr(&nmi_print_seq
);
67 len
= atomic_read(&s
->len
);
69 if (len
>= sizeof(s
->buffer
)) {
70 atomic_inc(&nmi_message_lost
);
75 * Make sure that all old data have been read before the buffer was
76 * reseted. This is not needed when we just append data.
81 add
= vsnprintf(s
->buffer
+ len
, sizeof(s
->buffer
) - len
, fmt
, args
);
84 * Do it once again if the buffer has been flushed in the meantime.
85 * Note that atomic_cmpxchg() is an implicit memory barrier that
86 * makes sure that the data were written before updating s->len.
88 if (atomic_cmpxchg(&s
->len
, len
, len
+ add
) != len
)
91 /* Get flushed in a more safe context. */
92 if (add
&& printk_nmi_irq_ready
) {
93 /* Make sure that IRQ work is really initialized. */
95 irq_work_queue(&s
->work
);
102 * printk one line from the temporary buffer from @start index until
103 * and including the @end index.
105 static void print_nmi_seq_line(struct nmi_seq_buf
*s
, int start
, int end
)
107 const char *buf
= s
->buffer
+ start
;
109 printk("%.*s", (end
- start
) + 1, buf
);
113 * Flush data from the associated per_CPU buffer. The function
114 * can be called either via IRQ work or independently.
116 static void __printk_nmi_flush(struct irq_work
*work
)
118 static raw_spinlock_t read_lock
=
119 __RAW_SPIN_LOCK_INITIALIZER(read_lock
);
120 struct nmi_seq_buf
*s
= container_of(work
, struct nmi_seq_buf
, work
);
126 * The lock has two functions. First, one reader has to flush all
127 * available message to make the lockless synchronization with
128 * writers easier. Second, we do not want to mix messages from
129 * different CPUs. This is especially important when printing
132 raw_spin_lock_irqsave(&read_lock
, flags
);
136 len
= atomic_read(&s
->len
);
139 * This is just a paranoid check that nobody has manipulated
140 * the buffer an unexpected way. If we printed something then
141 * @len must only increase.
144 pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
148 goto out
; /* Someone else has already flushed the buffer. */
150 /* Make sure that data has been written up to the @len */
153 size
= min(len
, sizeof(s
->buffer
));
156 /* Print line by line. */
157 for (; i
< size
; i
++) {
158 if (s
->buffer
[i
] == '\n') {
159 print_nmi_seq_line(s
, last_i
, i
);
163 /* Check if there was a partial line. */
165 print_nmi_seq_line(s
, last_i
, size
- 1);
170 * Check that nothing has got added in the meantime and truncate
171 * the buffer. Note that atomic_cmpxchg() is an implicit memory
172 * barrier that makes sure that the data were copied before
175 if (atomic_cmpxchg(&s
->len
, len
, 0) != len
)
179 raw_spin_unlock_irqrestore(&read_lock
, flags
);
183 * printk_nmi_flush - flush all per-cpu nmi buffers.
185 * The buffers are flushed automatically via IRQ work. This function
186 * is useful only when someone wants to be sure that all buffers have
187 * been flushed at some point.
189 void printk_nmi_flush(void)
193 for_each_possible_cpu(cpu
)
194 __printk_nmi_flush(&per_cpu(nmi_print_seq
, cpu
).work
);
197 void __init
printk_nmi_init(void)
201 for_each_possible_cpu(cpu
) {
202 struct nmi_seq_buf
*s
= &per_cpu(nmi_print_seq
, cpu
);
204 init_irq_work(&s
->work
, __printk_nmi_flush
);
207 /* Make sure that IRQ works are initialized before enabling. */
209 printk_nmi_irq_ready
= 1;
211 /* Flush pending messages that did not have scheduled IRQ works. */
215 void printk_nmi_enter(void)
217 this_cpu_write(printk_func
, vprintk_nmi
);
220 void printk_nmi_exit(void)
222 this_cpu_write(printk_func
, vprintk_default
);