printk/nmi: warn when some message has been lost in NMI context
[deliverable/linux.git] / kernel / printk / nmi.c
CommitLineData
42a0bb3f
PM
1/*
2 * nmi.c - Safe printk in NMI context
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/smp.h>
21#include <linux/cpumask.h>
22#include <linux/irq_work.h>
23#include <linux/printk.h>
24
25#include "internal.h"
26
27/*
28 * printk() could not take logbuf_lock in NMI context. Instead,
29 * it uses an alternative implementation that temporary stores
30 * the strings into a per-CPU buffer. The content of the buffer
31 * is later flushed into the main ring buffer via IRQ work.
32 *
33 * The alternative implementation is chosen transparently
34 * via @printk_func per-CPU variable.
35 *
36 * The implementation allows to flush the strings also from another CPU.
37 * There are situations when we want to make sure that all buffers
38 * were handled or when IRQs are blocked.
39 */
40DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
41static int printk_nmi_irq_ready;
b522deab 42atomic_t nmi_message_lost;
42a0bb3f
PM
43
44#define NMI_LOG_BUF_LEN (4096 - sizeof(atomic_t) - sizeof(struct irq_work))
45
46struct nmi_seq_buf {
47 atomic_t len; /* length of written data */
48 struct irq_work work; /* IRQ work that flushes the buffer */
49 unsigned char buffer[NMI_LOG_BUF_LEN];
50};
51static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
52
53/*
54 * Safe printk() for NMI context. It uses a per-CPU buffer to
55 * store the message. NMIs are not nested, so there is always only
56 * one writer running. But the buffer might get flushed from another
57 * CPU, so we need to be careful.
58 */
59static int vprintk_nmi(const char *fmt, va_list args)
60{
61 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
62 int add = 0;
63 size_t len;
64
65again:
66 len = atomic_read(&s->len);
67
b522deab
PM
68 if (len >= sizeof(s->buffer)) {
69 atomic_inc(&nmi_message_lost);
42a0bb3f 70 return 0;
b522deab 71 }
42a0bb3f
PM
72
73 /*
74 * Make sure that all old data have been read before the buffer was
75 * reseted. This is not needed when we just append data.
76 */
77 if (!len)
78 smp_rmb();
79
80 add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
81
82 /*
83 * Do it once again if the buffer has been flushed in the meantime.
84 * Note that atomic_cmpxchg() is an implicit memory barrier that
85 * makes sure that the data were written before updating s->len.
86 */
87 if (atomic_cmpxchg(&s->len, len, len + add) != len)
88 goto again;
89
90 /* Get flushed in a more safe context. */
91 if (add && printk_nmi_irq_ready) {
92 /* Make sure that IRQ work is really initialized. */
93 smp_rmb();
94 irq_work_queue(&s->work);
95 }
96
97 return add;
98}
99
100/*
101 * printk one line from the temporary buffer from @start index until
102 * and including the @end index.
103 */
104static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
105{
106 const char *buf = s->buffer + start;
107
108 printk("%.*s", (end - start) + 1, buf);
109}
110
111/*
112 * Flush data from the associated per_CPU buffer. The function
113 * can be called either via IRQ work or independently.
114 */
115static void __printk_nmi_flush(struct irq_work *work)
116{
117 static raw_spinlock_t read_lock =
118 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
119 struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work);
120 unsigned long flags;
121 size_t len, size;
122 int i, last_i;
123
124 /*
125 * The lock has two functions. First, one reader has to flush all
126 * available message to make the lockless synchronization with
127 * writers easier. Second, we do not want to mix messages from
128 * different CPUs. This is especially important when printing
129 * a backtrace.
130 */
131 raw_spin_lock_irqsave(&read_lock, flags);
132
133 i = 0;
134more:
135 len = atomic_read(&s->len);
136
137 /*
138 * This is just a paranoid check that nobody has manipulated
139 * the buffer an unexpected way. If we printed something then
140 * @len must only increase.
141 */
142 if (i && i >= len)
143 pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
144 i, len);
145
146 if (!len)
147 goto out; /* Someone else has already flushed the buffer. */
148
149 /* Make sure that data has been written up to the @len */
150 smp_rmb();
151
152 size = min(len, sizeof(s->buffer));
153 last_i = i;
154
155 /* Print line by line. */
156 for (; i < size; i++) {
157 if (s->buffer[i] == '\n') {
158 print_nmi_seq_line(s, last_i, i);
159 last_i = i + 1;
160 }
161 }
162 /* Check if there was a partial line. */
163 if (last_i < size) {
164 print_nmi_seq_line(s, last_i, size - 1);
165 pr_cont("\n");
166 }
167
168 /*
169 * Check that nothing has got added in the meantime and truncate
170 * the buffer. Note that atomic_cmpxchg() is an implicit memory
171 * barrier that makes sure that the data were copied before
172 * updating s->len.
173 */
174 if (atomic_cmpxchg(&s->len, len, 0) != len)
175 goto more;
176
177out:
178 raw_spin_unlock_irqrestore(&read_lock, flags);
179}
180
181/**
182 * printk_nmi_flush - flush all per-cpu nmi buffers.
183 *
184 * The buffers are flushed automatically via IRQ work. This function
185 * is useful only when someone wants to be sure that all buffers have
186 * been flushed at some point.
187 */
188void printk_nmi_flush(void)
189{
190 int cpu;
191
192 for_each_possible_cpu(cpu)
193 __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work);
194}
195
196void __init printk_nmi_init(void)
197{
198 int cpu;
199
200 for_each_possible_cpu(cpu) {
201 struct nmi_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
202
203 init_irq_work(&s->work, __printk_nmi_flush);
204 }
205
206 /* Make sure that IRQ works are initialized before enabling. */
207 smp_wmb();
208 printk_nmi_irq_ready = 1;
209
210 /* Flush pending messages that did not have scheduled IRQ works. */
211 printk_nmi_flush();
212}
213
214void printk_nmi_enter(void)
215{
216 this_cpu_write(printk_func, vprintk_nmi);
217}
218
219void printk_nmi_exit(void)
220{
221 this_cpu_write(printk_func, vprintk_default);
222}
This page took 0.06025 seconds and 5 git commands to generate.