Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[deliverable/linux.git] / fs / pstore / platform.c
1 /*
2 * Persistent Storage - platform driver interface parts.
3 *
4 * Copyright (C) 2007-2008 Google, Inc.
5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include <linux/atomic.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/kmsg_dump.h>
26 #include <linux/console.h>
27 #include <linux/module.h>
28 #include <linux/pstore.h>
29 #include <linux/zlib.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/uaccess.h>
34 #include <linux/hardirq.h>
35 #include <linux/jiffies.h>
36 #include <linux/workqueue.h>
37
38 #include "internal.h"
39
40 /*
41 * We defer making "oops" entries appear in pstore - see
42 * whether the system is actually still running well enough
43 * to let someone see the entry
44 */
45 static int pstore_update_ms = -1;
46 module_param_named(update_ms, pstore_update_ms, int, 0600);
47 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
48 "(default is -1, which means runtime updates are disabled; "
49 "enabling this option is not safe, it may lead to further "
50 "corruption on Oopses)");
51
52 static int pstore_new_entry;
53
54 static void pstore_timefunc(unsigned long);
55 static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0);
56
57 static void pstore_dowork(struct work_struct *);
58 static DECLARE_WORK(pstore_work, pstore_dowork);
59
60 /*
61 * pstore_lock just protects "psinfo" during
62 * calls to pstore_register()
63 */
64 static DEFINE_SPINLOCK(pstore_lock);
65 struct pstore_info *psinfo;
66
67 static char *backend;
68
69 /* Compression parameters */
70 #define COMPR_LEVEL 6
71 #define WINDOW_BITS 12
72 #define MEM_LEVEL 4
73 static struct z_stream_s stream;
74
75 static char *big_oops_buf;
76 static size_t big_oops_buf_sz;
77
78 /* How much of the console log to snapshot */
79 static unsigned long kmsg_bytes = 10240;
80
81 void pstore_set_kmsg_bytes(int bytes)
82 {
83 kmsg_bytes = bytes;
84 }
85
86 /* Tag each group of saved records with a sequence number */
87 static int oopscount;
88
89 static const char *get_reason_str(enum kmsg_dump_reason reason)
90 {
91 switch (reason) {
92 case KMSG_DUMP_PANIC:
93 return "Panic";
94 case KMSG_DUMP_OOPS:
95 return "Oops";
96 case KMSG_DUMP_EMERG:
97 return "Emergency";
98 case KMSG_DUMP_RESTART:
99 return "Restart";
100 case KMSG_DUMP_HALT:
101 return "Halt";
102 case KMSG_DUMP_POWEROFF:
103 return "Poweroff";
104 default:
105 return "Unknown";
106 }
107 }
108
109 bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
110 {
111 /*
112 * In case of NMI path, pstore shouldn't be blocked
113 * regardless of reason.
114 */
115 if (in_nmi())
116 return true;
117
118 switch (reason) {
119 /* In panic case, other cpus are stopped by smp_send_stop(). */
120 case KMSG_DUMP_PANIC:
121 /* Emergency restart shouldn't be blocked by spin lock. */
122 case KMSG_DUMP_EMERG:
123 return true;
124 default:
125 return false;
126 }
127 }
128 EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
129
130 /* Derived from logfs_compress() */
131 static int pstore_compress(const void *in, void *out, size_t inlen,
132 size_t outlen)
133 {
134 int err, ret;
135
136 ret = -EIO;
137 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
138 MEM_LEVEL, Z_DEFAULT_STRATEGY);
139 if (err != Z_OK)
140 goto error;
141
142 stream.next_in = in;
143 stream.avail_in = inlen;
144 stream.total_in = 0;
145 stream.next_out = out;
146 stream.avail_out = outlen;
147 stream.total_out = 0;
148
149 err = zlib_deflate(&stream, Z_FINISH);
150 if (err != Z_STREAM_END)
151 goto error;
152
153 err = zlib_deflateEnd(&stream);
154 if (err != Z_OK)
155 goto error;
156
157 if (stream.total_out >= stream.total_in)
158 goto error;
159
160 ret = stream.total_out;
161 error:
162 return ret;
163 }
164
165 /* Derived from logfs_uncompress */
166 static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
167 {
168 int err, ret;
169
170 ret = -EIO;
171 err = zlib_inflateInit2(&stream, WINDOW_BITS);
172 if (err != Z_OK)
173 goto error;
174
175 stream.next_in = in;
176 stream.avail_in = inlen;
177 stream.total_in = 0;
178 stream.next_out = out;
179 stream.avail_out = outlen;
180 stream.total_out = 0;
181
182 err = zlib_inflate(&stream, Z_FINISH);
183 if (err != Z_STREAM_END)
184 goto error;
185
186 err = zlib_inflateEnd(&stream);
187 if (err != Z_OK)
188 goto error;
189
190 ret = stream.total_out;
191 error:
192 return ret;
193 }
194
195 static void allocate_buf_for_compression(void)
196 {
197 size_t size;
198 size_t cmpr;
199
200 switch (psinfo->bufsize) {
201 /* buffer range for efivars */
202 case 1000 ... 2000:
203 cmpr = 56;
204 break;
205 case 2001 ... 3000:
206 cmpr = 54;
207 break;
208 case 3001 ... 3999:
209 cmpr = 52;
210 break;
211 /* buffer range for nvram, erst */
212 case 4000 ... 10000:
213 cmpr = 45;
214 break;
215 default:
216 cmpr = 60;
217 break;
218 }
219
220 big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
221 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
222 if (big_oops_buf) {
223 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
224 zlib_inflate_workspacesize());
225 stream.workspace = kmalloc(size, GFP_KERNEL);
226 if (!stream.workspace) {
227 pr_err("pstore: No memory for compression workspace; "
228 "skipping compression\n");
229 kfree(big_oops_buf);
230 big_oops_buf = NULL;
231 }
232 } else {
233 pr_err("No memory for uncompressed data; "
234 "skipping compression\n");
235 stream.workspace = NULL;
236 }
237
238 }
239
240 /*
241 * Called when compression fails, since the printk buffer
242 * would be fetched for compression calling it again when
243 * compression fails would have moved the iterator of
244 * printk buffer which results in fetching old contents.
245 * Copy the recent messages from big_oops_buf to psinfo->buf
246 */
247 static size_t copy_kmsg_to_buffer(int hsize, size_t len)
248 {
249 size_t total_len;
250 size_t diff;
251
252 total_len = hsize + len;
253
254 if (total_len > psinfo->bufsize) {
255 diff = total_len - psinfo->bufsize + hsize;
256 memcpy(psinfo->buf, big_oops_buf, hsize);
257 memcpy(psinfo->buf + hsize, big_oops_buf + diff,
258 psinfo->bufsize - hsize);
259 total_len = psinfo->bufsize;
260 } else
261 memcpy(psinfo->buf, big_oops_buf, total_len);
262
263 return total_len;
264 }
265
266 /*
267 * callback from kmsg_dump. (s2,l2) has the most recently
268 * written bytes, older bytes are in (s1,l1). Save as much
269 * as we can from the end of the buffer.
270 */
271 static void pstore_dump(struct kmsg_dumper *dumper,
272 enum kmsg_dump_reason reason)
273 {
274 unsigned long total = 0;
275 const char *why;
276 u64 id;
277 unsigned int part = 1;
278 unsigned long flags = 0;
279 int is_locked = 0;
280 int ret;
281
282 why = get_reason_str(reason);
283
284 if (pstore_cannot_block_path(reason)) {
285 is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
286 if (!is_locked) {
287 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
288 , in_nmi() ? "NMI" : why);
289 }
290 } else
291 spin_lock_irqsave(&psinfo->buf_lock, flags);
292 oopscount++;
293 while (total < kmsg_bytes) {
294 char *dst;
295 unsigned long size;
296 int hsize;
297 int zipped_len = -1;
298 size_t len;
299 bool compressed;
300 size_t total_len;
301
302 if (big_oops_buf) {
303 dst = big_oops_buf;
304 hsize = sprintf(dst, "%s#%d Part%d\n", why,
305 oopscount, part);
306 size = big_oops_buf_sz - hsize;
307
308 if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
309 size, &len))
310 break;
311
312 zipped_len = pstore_compress(dst, psinfo->buf,
313 hsize + len, psinfo->bufsize);
314
315 if (zipped_len > 0) {
316 compressed = true;
317 total_len = zipped_len;
318 } else {
319 compressed = false;
320 total_len = copy_kmsg_to_buffer(hsize, len);
321 }
322 } else {
323 dst = psinfo->buf;
324 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount,
325 part);
326 size = psinfo->bufsize - hsize;
327 dst += hsize;
328
329 if (!kmsg_dump_get_buffer(dumper, true, dst,
330 size, &len))
331 break;
332
333 compressed = false;
334 total_len = hsize + len;
335 }
336
337 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
338 oopscount, compressed, total_len, psinfo);
339 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
340 pstore_new_entry = 1;
341
342 total += total_len;
343 part++;
344 }
345 if (pstore_cannot_block_path(reason)) {
346 if (is_locked)
347 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
348 } else
349 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
350 }
351
352 static struct kmsg_dumper pstore_dumper = {
353 .dump = pstore_dump,
354 };
355
356 #ifdef CONFIG_PSTORE_CONSOLE
357 static void pstore_console_write(struct console *con, const char *s, unsigned c)
358 {
359 const char *e = s + c;
360
361 while (s < e) {
362 unsigned long flags;
363 u64 id;
364
365 if (c > psinfo->bufsize)
366 c = psinfo->bufsize;
367
368 if (oops_in_progress) {
369 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
370 break;
371 } else {
372 spin_lock_irqsave(&psinfo->buf_lock, flags);
373 }
374 memcpy(psinfo->buf, s, c);
375 psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, 0, c, psinfo);
376 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
377 s += c;
378 c = e - s;
379 }
380 }
381
382 static struct console pstore_console = {
383 .name = "pstore",
384 .write = pstore_console_write,
385 .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
386 .index = -1,
387 };
388
389 static void pstore_register_console(void)
390 {
391 register_console(&pstore_console);
392 }
393 #else
394 static void pstore_register_console(void) {}
395 #endif
396
397 static int pstore_write_compat(enum pstore_type_id type,
398 enum kmsg_dump_reason reason,
399 u64 *id, unsigned int part, int count,
400 bool compressed, size_t size,
401 struct pstore_info *psi)
402 {
403 return psi->write_buf(type, reason, id, part, psinfo->buf, compressed,
404 size, psi);
405 }
406
407 /*
408 * platform specific persistent storage driver registers with
409 * us here. If pstore is already mounted, call the platform
410 * read function right away to populate the file system. If not
411 * then the pstore mount code will call us later to fill out
412 * the file system.
413 *
414 * Register with kmsg_dump to save last part of console log on panic.
415 */
416 int pstore_register(struct pstore_info *psi)
417 {
418 struct module *owner = psi->owner;
419
420 if (backend && strcmp(backend, psi->name))
421 return -EPERM;
422
423 spin_lock(&pstore_lock);
424 if (psinfo) {
425 spin_unlock(&pstore_lock);
426 return -EBUSY;
427 }
428
429 if (!psi->write)
430 psi->write = pstore_write_compat;
431 psinfo = psi;
432 mutex_init(&psinfo->read_mutex);
433 spin_unlock(&pstore_lock);
434
435 if (owner && !try_module_get(owner)) {
436 psinfo = NULL;
437 return -EINVAL;
438 }
439
440 allocate_buf_for_compression();
441
442 if (pstore_is_mounted())
443 pstore_get_records(0);
444
445 kmsg_dump_register(&pstore_dumper);
446 pstore_register_console();
447 pstore_register_ftrace();
448
449 if (pstore_update_ms >= 0) {
450 pstore_timer.expires = jiffies +
451 msecs_to_jiffies(pstore_update_ms);
452 add_timer(&pstore_timer);
453 }
454
455 pr_info("pstore: Registered %s as persistent store backend\n",
456 psi->name);
457
458 return 0;
459 }
460 EXPORT_SYMBOL_GPL(pstore_register);
461
462 /*
463 * Read all the records from the persistent store. Create
464 * files in our filesystem. Don't warn about -EEXIST errors
465 * when we are re-scanning the backing store looking to add new
466 * error records.
467 */
468 void pstore_get_records(int quiet)
469 {
470 struct pstore_info *psi = psinfo;
471 char *buf = NULL;
472 ssize_t size;
473 u64 id;
474 int count;
475 enum pstore_type_id type;
476 struct timespec time;
477 int failed = 0, rc;
478 bool compressed;
479 int unzipped_len = -1;
480
481 if (!psi)
482 return;
483
484 mutex_lock(&psi->read_mutex);
485 if (psi->open && psi->open(psi))
486 goto out;
487
488 while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed,
489 psi)) > 0) {
490 if (compressed && (type == PSTORE_TYPE_DMESG)) {
491 if (big_oops_buf)
492 unzipped_len = pstore_decompress(buf,
493 big_oops_buf, size,
494 big_oops_buf_sz);
495
496 if (unzipped_len > 0) {
497 buf = big_oops_buf;
498 size = unzipped_len;
499 compressed = false;
500 } else {
501 pr_err("pstore: decompression failed;"
502 "returned %d\n", unzipped_len);
503 compressed = true;
504 }
505 }
506 rc = pstore_mkfile(type, psi->name, id, count, buf,
507 compressed, (size_t)size, time, psi);
508 if (unzipped_len < 0) {
509 /* Free buffer other than big oops */
510 kfree(buf);
511 buf = NULL;
512 } else
513 unzipped_len = -1;
514 if (rc && (rc != -EEXIST || !quiet))
515 failed++;
516 }
517 if (psi->close)
518 psi->close(psi);
519 out:
520 mutex_unlock(&psi->read_mutex);
521
522 if (failed)
523 printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
524 failed, psi->name);
525 }
526
527 static void pstore_dowork(struct work_struct *work)
528 {
529 pstore_get_records(1);
530 }
531
532 static void pstore_timefunc(unsigned long dummy)
533 {
534 if (pstore_new_entry) {
535 pstore_new_entry = 0;
536 schedule_work(&pstore_work);
537 }
538
539 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
540 }
541
542 module_param(backend, charp, 0444);
543 MODULE_PARM_DESC(backend, "Pstore backend to use");
This page took 0.046153 seconds and 5 git commands to generate.