Commit | Line | Data |
---|---|---|
42a0bb3f PM |
1 | /* |
2 | * nmi.c - Safe printk in NMI context | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version 2 | |
7 | * of the License, or (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/preempt.h> | |
19 | #include <linux/spinlock.h> | |
cf9b1106 | 20 | #include <linux/debug_locks.h> |
42a0bb3f PM |
21 | #include <linux/smp.h> |
22 | #include <linux/cpumask.h> | |
23 | #include <linux/irq_work.h> | |
24 | #include <linux/printk.h> | |
25 | ||
26 | #include "internal.h" | |
27 | ||
28 | /* | |
29 | * printk() could not take logbuf_lock in NMI context. Instead, | |
30 | * it uses an alternative implementation that temporary stores | |
31 | * the strings into a per-CPU buffer. The content of the buffer | |
32 | * is later flushed into the main ring buffer via IRQ work. | |
33 | * | |
34 | * The alternative implementation is chosen transparently | |
35 | * via @printk_func per-CPU variable. | |
36 | * | |
37 | * The implementation allows to flush the strings also from another CPU. | |
38 | * There are situations when we want to make sure that all buffers | |
39 | * were handled or when IRQs are blocked. | |
40 | */ | |
41 | DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default; | |
42 | static int printk_nmi_irq_ready; | |
b522deab | 43 | atomic_t nmi_message_lost; |
42a0bb3f | 44 | |
427934b8 PM |
45 | #define NMI_LOG_BUF_LEN ((1 << CONFIG_NMI_LOG_BUF_SHIFT) - \ |
46 | sizeof(atomic_t) - sizeof(struct irq_work)) | |
42a0bb3f PM |
47 | |
48 | struct nmi_seq_buf { | |
49 | atomic_t len; /* length of written data */ | |
50 | struct irq_work work; /* IRQ work that flushes the buffer */ | |
51 | unsigned char buffer[NMI_LOG_BUF_LEN]; | |
52 | }; | |
53 | static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | |
54 | ||
55 | /* | |
56 | * Safe printk() for NMI context. It uses a per-CPU buffer to | |
57 | * store the message. NMIs are not nested, so there is always only | |
58 | * one writer running. But the buffer might get flushed from another | |
59 | * CPU, so we need to be careful. | |
60 | */ | |
874f9c7d | 61 | static int vprintk_nmi(int level, const char *fmt, va_list args) |
42a0bb3f PM |
62 | { |
63 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | |
64 | int add = 0; | |
65 | size_t len; | |
66 | ||
67 | again: | |
68 | len = atomic_read(&s->len); | |
69 | ||
b522deab PM |
70 | if (len >= sizeof(s->buffer)) { |
71 | atomic_inc(&nmi_message_lost); | |
42a0bb3f | 72 | return 0; |
b522deab | 73 | } |
42a0bb3f PM |
74 | |
75 | /* | |
76 | * Make sure that all old data have been read before the buffer was | |
77 | * reseted. This is not needed when we just append data. | |
78 | */ | |
79 | if (!len) | |
80 | smp_rmb(); | |
81 | ||
874f9c7d JP |
82 | if (level != LOGLEVEL_DEFAULT) { |
83 | add = snprintf(s->buffer + len, sizeof(s->buffer) - len, | |
84 | KERN_SOH "%c", '0' + level); | |
85 | add += vsnprintf(s->buffer + len + add, | |
86 | sizeof(s->buffer) - len - add, | |
87 | fmt, args); | |
88 | } else { | |
89 | add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, | |
90 | fmt, args); | |
91 | } | |
42a0bb3f PM |
92 | |
93 | /* | |
94 | * Do it once again if the buffer has been flushed in the meantime. | |
95 | * Note that atomic_cmpxchg() is an implicit memory barrier that | |
96 | * makes sure that the data were written before updating s->len. | |
97 | */ | |
98 | if (atomic_cmpxchg(&s->len, len, len + add) != len) | |
99 | goto again; | |
100 | ||
101 | /* Get flushed in a more safe context. */ | |
102 | if (add && printk_nmi_irq_ready) { | |
103 | /* Make sure that IRQ work is really initialized. */ | |
104 | smp_rmb(); | |
105 | irq_work_queue(&s->work); | |
106 | } | |
107 | ||
108 | return add; | |
109 | } | |
110 | ||
111 | /* | |
112 | * printk one line from the temporary buffer from @start index until | |
113 | * and including the @end index. | |
114 | */ | |
115 | static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end) | |
116 | { | |
117 | const char *buf = s->buffer + start; | |
118 | ||
cf9b1106 PM |
119 | /* |
120 | * The buffers are flushed in NMI only on panic. The messages must | |
121 | * go only into the ring buffer at this stage. Consoles will get | |
122 | * explicitly called later when a crashdump is not generated. | |
123 | */ | |
124 | if (in_nmi()) | |
125 | printk_deferred("%.*s", (end - start) + 1, buf); | |
126 | else | |
127 | printk("%.*s", (end - start) + 1, buf); | |
128 | ||
42a0bb3f PM |
129 | } |
130 | ||
131 | /* | |
132 | * Flush data from the associated per_CPU buffer. The function | |
133 | * can be called either via IRQ work or independently. | |
134 | */ | |
135 | static void __printk_nmi_flush(struct irq_work *work) | |
136 | { | |
137 | static raw_spinlock_t read_lock = | |
138 | __RAW_SPIN_LOCK_INITIALIZER(read_lock); | |
139 | struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work); | |
140 | unsigned long flags; | |
141 | size_t len, size; | |
142 | int i, last_i; | |
143 | ||
144 | /* | |
145 | * The lock has two functions. First, one reader has to flush all | |
146 | * available message to make the lockless synchronization with | |
147 | * writers easier. Second, we do not want to mix messages from | |
148 | * different CPUs. This is especially important when printing | |
149 | * a backtrace. | |
150 | */ | |
151 | raw_spin_lock_irqsave(&read_lock, flags); | |
152 | ||
153 | i = 0; | |
154 | more: | |
155 | len = atomic_read(&s->len); | |
156 | ||
157 | /* | |
158 | * This is just a paranoid check that nobody has manipulated | |
159 | * the buffer an unexpected way. If we printed something then | |
160 | * @len must only increase. | |
161 | */ | |
162 | if (i && i >= len) | |
163 | pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n", | |
164 | i, len); | |
165 | ||
166 | if (!len) | |
167 | goto out; /* Someone else has already flushed the buffer. */ | |
168 | ||
169 | /* Make sure that data has been written up to the @len */ | |
170 | smp_rmb(); | |
171 | ||
172 | size = min(len, sizeof(s->buffer)); | |
173 | last_i = i; | |
174 | ||
175 | /* Print line by line. */ | |
176 | for (; i < size; i++) { | |
177 | if (s->buffer[i] == '\n') { | |
178 | print_nmi_seq_line(s, last_i, i); | |
179 | last_i = i + 1; | |
180 | } | |
181 | } | |
182 | /* Check if there was a partial line. */ | |
183 | if (last_i < size) { | |
184 | print_nmi_seq_line(s, last_i, size - 1); | |
185 | pr_cont("\n"); | |
186 | } | |
187 | ||
188 | /* | |
189 | * Check that nothing has got added in the meantime and truncate | |
190 | * the buffer. Note that atomic_cmpxchg() is an implicit memory | |
191 | * barrier that makes sure that the data were copied before | |
192 | * updating s->len. | |
193 | */ | |
194 | if (atomic_cmpxchg(&s->len, len, 0) != len) | |
195 | goto more; | |
196 | ||
197 | out: | |
198 | raw_spin_unlock_irqrestore(&read_lock, flags); | |
199 | } | |
200 | ||
201 | /** | |
202 | * printk_nmi_flush - flush all per-cpu nmi buffers. | |
203 | * | |
204 | * The buffers are flushed automatically via IRQ work. This function | |
205 | * is useful only when someone wants to be sure that all buffers have | |
206 | * been flushed at some point. | |
207 | */ | |
208 | void printk_nmi_flush(void) | |
209 | { | |
210 | int cpu; | |
211 | ||
212 | for_each_possible_cpu(cpu) | |
213 | __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work); | |
214 | } | |
215 | ||
cf9b1106 PM |
216 | /** |
217 | * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system | |
218 | * goes down. | |
219 | * | |
220 | * Similar to printk_nmi_flush() but it can be called even in NMI context when | |
221 | * the system goes down. It does the best effort to get NMI messages into | |
222 | * the main ring buffer. | |
223 | * | |
224 | * Note that it could try harder when there is only one CPU online. | |
225 | */ | |
226 | void printk_nmi_flush_on_panic(void) | |
227 | { | |
228 | /* | |
229 | * Make sure that we could access the main ring buffer. | |
230 | * Do not risk a double release when more CPUs are up. | |
231 | */ | |
232 | if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) { | |
233 | if (num_online_cpus() > 1) | |
234 | return; | |
235 | ||
236 | debug_locks_off(); | |
237 | raw_spin_lock_init(&logbuf_lock); | |
238 | } | |
239 | ||
240 | printk_nmi_flush(); | |
241 | } | |
242 | ||
42a0bb3f PM |
243 | void __init printk_nmi_init(void) |
244 | { | |
245 | int cpu; | |
246 | ||
247 | for_each_possible_cpu(cpu) { | |
248 | struct nmi_seq_buf *s = &per_cpu(nmi_print_seq, cpu); | |
249 | ||
250 | init_irq_work(&s->work, __printk_nmi_flush); | |
251 | } | |
252 | ||
253 | /* Make sure that IRQ works are initialized before enabling. */ | |
254 | smp_wmb(); | |
255 | printk_nmi_irq_ready = 1; | |
256 | ||
257 | /* Flush pending messages that did not have scheduled IRQ works. */ | |
258 | printk_nmi_flush(); | |
259 | } | |
260 | ||
261 | void printk_nmi_enter(void) | |
262 | { | |
263 | this_cpu_write(printk_func, vprintk_nmi); | |
264 | } | |
265 | ||
266 | void printk_nmi_exit(void) | |
267 | { | |
268 | this_cpu_write(printk_func, vprintk_default); | |
269 | } |