printk() - do not merge continuation lines of different threads
[deliverable/linux.git] / kernel / printk.c
1 /*
2 * linux/kernel/printk.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Modified to make sys_syslog() more flexible: added commands to
7 * return the last 4k of kernel messages, regardless of whether
8 * they've been read or not. Added option to suppress kernel printk's
9 * to the console. Added hook for sending the console messages
10 * elsewhere, in preparation for a serial line console (someday).
11 * Ted Ts'o, 2/11/93.
12 * Modified for sysctl support, 1/8/97, Chris Horn.
13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
14 * manfred@colorfullife.com
15 * Rewrote bits to get rid of console_lock
16 * 01Mar01 Andrew Morton
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/console.h>
24 #include <linux/init.h>
25 #include <linux/jiffies.h>
26 #include <linux/nmi.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/interrupt.h> /* For in_interrupt() */
30 #include <linux/delay.h>
31 #include <linux/smp.h>
32 #include <linux/security.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/syscalls.h>
36 #include <linux/kexec.h>
37 #include <linux/kdb.h>
38 #include <linux/ratelimit.h>
39 #include <linux/kmsg_dump.h>
40 #include <linux/syslog.h>
41 #include <linux/cpu.h>
42 #include <linux/notifier.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45
46 #include <asm/uaccess.h>
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/printk.h>
50
51 /*
52 * Architectures can override it:
53 */
54 void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
55 {
56 }
57
58 /* printk's without a loglevel use this.. */
59 #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
60
61 /* We show everything that is MORE important than this.. */
62 #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
63 #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
64
65 DECLARE_WAIT_QUEUE_HEAD(log_wait);
66
67 int console_printk[4] = {
68 DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
69 DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
70 MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
71 DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
72 };
73
74 /*
75 * Low level drivers may need that to know if they can schedule in
76 * their unblank() callback or not. So let's export it.
77 */
78 int oops_in_progress;
79 EXPORT_SYMBOL(oops_in_progress);
80
81 /*
82 * console_sem protects the console_drivers list, and also
83 * provides serialisation for access to the entire console
84 * driver system.
85 */
86 static DEFINE_SEMAPHORE(console_sem);
87 struct console *console_drivers;
88 EXPORT_SYMBOL_GPL(console_drivers);
89
90 /*
91 * This is used for debugging the mess that is the VT code by
92 * keeping track if we have the console semaphore held. It's
93 * definitely not the perfect debug tool (we don't know if _WE_
94 * hold it are racing, but it helps tracking those weird code
95 * path in the console code where we end up in places I want
96 * locked without the console sempahore held
97 */
98 static int console_locked, console_suspended;
99
100 /*
101 * If exclusive_console is non-NULL then only this console is to be printed to.
102 */
103 static struct console *exclusive_console;
104
105 /*
106 * Array of consoles built from command line options (console=)
107 */
108 struct console_cmdline
109 {
110 char name[8]; /* Name of the driver */
111 int index; /* Minor dev. to use */
112 char *options; /* Options for the driver */
113 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
114 char *brl_options; /* Options for braille driver */
115 #endif
116 };
117
118 #define MAX_CMDLINECONSOLES 8
119
120 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
121 static int selected_console = -1;
122 static int preferred_console = -1;
123 int console_set_on_cmdline;
124 EXPORT_SYMBOL(console_set_on_cmdline);
125
126 /* Flag: console code may call schedule() */
127 static int console_may_schedule;
128
129 /*
130 * The printk log buffer consists of a chain of concatenated variable
131 * length records. Every record starts with a record header, containing
132 * the overall length of the record.
133 *
134 * The heads to the first and last entry in the buffer, as well as the
135 * sequence numbers of these both entries are maintained when messages
136 * are stored..
137 *
138 * If the heads indicate available messages, the length in the header
139 * tells the start next message. A length == 0 for the next message
140 * indicates a wrap-around to the beginning of the buffer.
141 *
142 * Every record carries the monotonic timestamp in microseconds, as well as
143 * the standard userspace syslog level and syslog facility. The usual
144 * kernel messages use LOG_KERN; userspace-injected messages always carry
145 * a matching syslog facility, by default LOG_USER. The origin of every
146 * message can be reliably determined that way.
147 *
148 * The human readable log message directly follows the message header. The
149 * length of the message text is stored in the header, the stored message
150 * is not terminated.
151 *
152 * Optionally, a message can carry a dictionary of properties (key/value pairs),
153 * to provide userspace with a machine-readable message context.
154 *
155 * Examples for well-defined, commonly used property names are:
156 * DEVICE=b12:8 device identifier
157 * b12:8 block dev_t
158 * c127:3 char dev_t
159 * n8 netdev ifindex
160 * +sound:card0 subsystem:devname
161 * SUBSYSTEM=pci driver-core subsystem name
162 *
163 * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
164 * follows directly after a '=' character. Every property is terminated by
165 * a '\0' character. The last property is not terminated.
166 *
167 * Example of a message structure:
168 * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
169 * 0008 34 00 record is 52 bytes long
170 * 000a 0b 00 text is 11 bytes long
171 * 000c 1f 00 dictionary is 23 bytes long
172 * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
173 * 0010 69 74 27 73 20 61 20 6c "it's a l"
174 * 69 6e 65 "ine"
175 * 001b 44 45 56 49 43 "DEVIC"
176 * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
177 * 52 49 56 45 52 3d 62 75 "RIVER=bu"
178 * 67 "g"
179 * 0032 00 00 00 padding to next message header
180 *
181 * The 'struct log' buffer header must never be directly exported to
182 * userspace, it is a kernel-private implementation detail that might
183 * need to be changed in the future, when the requirements change.
184 *
185 * /dev/kmsg exports the structured data in the following line format:
186 * "level,sequnum,timestamp;<message text>\n"
187 *
188 * The optional key/value pairs are attached as continuation lines starting
189 * with a space character and terminated by a newline. All possible
190 * non-prinatable characters are escaped in the "\xff" notation.
191 *
192 * Users of the export format should ignore possible additional values
193 * separated by ',', and find the message after the ';' character.
194 */
195
196 struct log {
197 u64 ts_nsec; /* timestamp in nanoseconds */
198 u16 len; /* length of entire record */
199 u16 text_len; /* length of text buffer */
200 u16 dict_len; /* length of dictionary buffer */
201 u16 level; /* syslog level + facility */
202 };
203
204 /*
205 * The logbuf_lock protects kmsg buffer, indices, counters. It is also
206 * used in interesting ways to provide interlocking in console_unlock();
207 */
208 static DEFINE_RAW_SPINLOCK(logbuf_lock);
209
210 /* the next printk record to read by syslog(READ) or /proc/kmsg */
211 static u64 syslog_seq;
212 static u32 syslog_idx;
213
214 /* index and sequence number of the first record stored in the buffer */
215 static u64 log_first_seq;
216 static u32 log_first_idx;
217
218 /* index and sequence number of the next record to store in the buffer */
219 static u64 log_next_seq;
220 #ifdef CONFIG_PRINTK
221 static u32 log_next_idx;
222
223 /* the next printk record to read after the last 'clear' command */
224 static u64 clear_seq;
225 static u32 clear_idx;
226
227 #define LOG_LINE_MAX 1024
228
229 /* record buffer */
230 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
231 static char __log_buf[__LOG_BUF_LEN];
232 static char *log_buf = __log_buf;
233 static u32 log_buf_len = __LOG_BUF_LEN;
234
235 /* cpu currently holding logbuf_lock */
236 static volatile unsigned int logbuf_cpu = UINT_MAX;
237
238 /* human readable text of the record */
239 static char *log_text(const struct log *msg)
240 {
241 return (char *)msg + sizeof(struct log);
242 }
243
244 /* optional key/value pair dictionary attached to the record */
245 static char *log_dict(const struct log *msg)
246 {
247 return (char *)msg + sizeof(struct log) + msg->text_len;
248 }
249
250 /* get record by index; idx must point to valid msg */
251 static struct log *log_from_idx(u32 idx)
252 {
253 struct log *msg = (struct log *)(log_buf + idx);
254
255 /*
256 * A length == 0 record is the end of buffer marker. Wrap around and
257 * read the message at the start of the buffer.
258 */
259 if (!msg->len)
260 return (struct log *)log_buf;
261 return msg;
262 }
263
264 /* get next record; idx must point to valid msg */
265 static u32 log_next(u32 idx)
266 {
267 struct log *msg = (struct log *)(log_buf + idx);
268
269 /* length == 0 indicates the end of the buffer; wrap */
270 /*
271 * A length == 0 record is the end of buffer marker. Wrap around and
272 * read the message at the start of the buffer as *this* one, and
273 * return the one after that.
274 */
275 if (!msg->len) {
276 msg = (struct log *)log_buf;
277 return msg->len;
278 }
279 return idx + msg->len;
280 }
281
282 #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
283 #define LOG_ALIGN 4
284 #else
285 #define LOG_ALIGN 8
286 #endif
287
288 /* insert record into the buffer, discard old ones, update heads */
289 static void log_store(int facility, int level,
290 const char *dict, u16 dict_len,
291 const char *text, u16 text_len)
292 {
293 struct log *msg;
294 u32 size, pad_len;
295
296 /* number of '\0' padding bytes to next message */
297 size = sizeof(struct log) + text_len + dict_len;
298 pad_len = (-size) & (LOG_ALIGN - 1);
299 size += pad_len;
300
301 while (log_first_seq < log_next_seq) {
302 u32 free;
303
304 if (log_next_idx > log_first_idx)
305 free = max(log_buf_len - log_next_idx, log_first_idx);
306 else
307 free = log_first_idx - log_next_idx;
308
309 if (free > size + sizeof(struct log))
310 break;
311
312 /* drop old messages until we have enough contiuous space */
313 log_first_idx = log_next(log_first_idx);
314 log_first_seq++;
315 }
316
317 if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
318 /*
319 * This message + an additional empty header does not fit
320 * at the end of the buffer. Add an empty header with len == 0
321 * to signify a wrap around.
322 */
323 memset(log_buf + log_next_idx, 0, sizeof(struct log));
324 log_next_idx = 0;
325 }
326
327 /* fill message */
328 msg = (struct log *)(log_buf + log_next_idx);
329 memcpy(log_text(msg), text, text_len);
330 msg->text_len = text_len;
331 memcpy(log_dict(msg), dict, dict_len);
332 msg->dict_len = dict_len;
333 msg->level = (facility << 3) | (level & 7);
334 msg->ts_nsec = local_clock();
335 memset(log_dict(msg) + dict_len, 0, pad_len);
336 msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
337
338 /* insert message */
339 log_next_idx += msg->len;
340 log_next_seq++;
341 }
342
343 /* /dev/kmsg - userspace message inject/listen interface */
344 struct devkmsg_user {
345 u64 seq;
346 u32 idx;
347 struct mutex lock;
348 char buf[8192];
349 };
350
351 static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
352 unsigned long count, loff_t pos)
353 {
354 char *buf, *line;
355 int i;
356 int level = default_message_loglevel;
357 int facility = 1; /* LOG_USER */
358 size_t len = iov_length(iv, count);
359 ssize_t ret = len;
360
361 if (len > LOG_LINE_MAX)
362 return -EINVAL;
363 buf = kmalloc(len+1, GFP_KERNEL);
364 if (buf == NULL)
365 return -ENOMEM;
366
367 line = buf;
368 for (i = 0; i < count; i++) {
369 if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len))
370 goto out;
371 line += iv[i].iov_len;
372 }
373
374 /*
375 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
376 * the decimal value represents 32bit, the lower 3 bit are the log
377 * level, the rest are the log facility.
378 *
379 * If no prefix or no userspace facility is specified, we
380 * enforce LOG_USER, to be able to reliably distinguish
381 * kernel-generated messages from userspace-injected ones.
382 */
383 line = buf;
384 if (line[0] == '<') {
385 char *endp = NULL;
386
387 i = simple_strtoul(line+1, &endp, 10);
388 if (endp && endp[0] == '>') {
389 level = i & 7;
390 if (i >> 3)
391 facility = i >> 3;
392 endp++;
393 len -= endp - line;
394 line = endp;
395 }
396 }
397 line[len] = '\0';
398
399 printk_emit(facility, level, NULL, 0, "%s", line);
400 out:
401 kfree(buf);
402 return ret;
403 }
404
405 static ssize_t devkmsg_read(struct file *file, char __user *buf,
406 size_t count, loff_t *ppos)
407 {
408 struct devkmsg_user *user = file->private_data;
409 struct log *msg;
410 u64 ts_usec;
411 size_t i;
412 size_t len;
413 ssize_t ret;
414
415 if (!user)
416 return -EBADF;
417
418 mutex_lock(&user->lock);
419 raw_spin_lock(&logbuf_lock);
420 while (user->seq == log_next_seq) {
421 if (file->f_flags & O_NONBLOCK) {
422 ret = -EAGAIN;
423 raw_spin_unlock(&logbuf_lock);
424 goto out;
425 }
426
427 raw_spin_unlock(&logbuf_lock);
428 ret = wait_event_interruptible(log_wait,
429 user->seq != log_next_seq);
430 if (ret)
431 goto out;
432 raw_spin_lock(&logbuf_lock);
433 }
434
435 if (user->seq < log_first_seq) {
436 /* our last seen message is gone, return error and reset */
437 user->idx = log_first_idx;
438 user->seq = log_first_seq;
439 ret = -EPIPE;
440 raw_spin_unlock(&logbuf_lock);
441 goto out;
442 }
443
444 msg = log_from_idx(user->idx);
445 ts_usec = msg->ts_nsec;
446 do_div(ts_usec, 1000);
447 len = sprintf(user->buf, "%u,%llu,%llu;",
448 msg->level, user->seq, ts_usec);
449
450 /* escape non-printable characters */
451 for (i = 0; i < msg->text_len; i++) {
452 char c = log_text(msg)[i];
453
454 if (c < ' ' || c >= 128)
455 len += sprintf(user->buf + len, "\\x%02x", c);
456 else
457 user->buf[len++] = c;
458 }
459 user->buf[len++] = '\n';
460
461 if (msg->dict_len) {
462 bool line = true;
463
464 for (i = 0; i < msg->dict_len; i++) {
465 char c = log_dict(msg)[i];
466
467 if (line) {
468 user->buf[len++] = ' ';
469 line = false;
470 }
471
472 if (c == '\0') {
473 user->buf[len++] = '\n';
474 line = true;
475 continue;
476 }
477
478 if (c < ' ' || c >= 128) {
479 len += sprintf(user->buf + len, "\\x%02x", c);
480 continue;
481 }
482
483 user->buf[len++] = c;
484 }
485 user->buf[len++] = '\n';
486 }
487
488 user->idx = log_next(user->idx);
489 user->seq++;
490 raw_spin_unlock(&logbuf_lock);
491
492 if (len > count) {
493 ret = -EINVAL;
494 goto out;
495 }
496
497 if (copy_to_user(buf, user->buf, len)) {
498 ret = -EFAULT;
499 goto out;
500 }
501 ret = len;
502 out:
503 mutex_unlock(&user->lock);
504 return ret;
505 }
506
507 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
508 {
509 struct devkmsg_user *user = file->private_data;
510 loff_t ret = 0;
511
512 if (!user)
513 return -EBADF;
514 if (offset)
515 return -ESPIPE;
516
517 raw_spin_lock(&logbuf_lock);
518 switch (whence) {
519 case SEEK_SET:
520 /* the first record */
521 user->idx = log_first_idx;
522 user->seq = log_first_seq;
523 break;
524 case SEEK_DATA:
525 /*
526 * The first record after the last SYSLOG_ACTION_CLEAR,
527 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
528 * changes no global state, and does not clear anything.
529 */
530 user->idx = clear_idx;
531 user->seq = clear_seq;
532 break;
533 case SEEK_END:
534 /* after the last record */
535 user->idx = log_next_idx;
536 user->seq = log_next_seq;
537 break;
538 default:
539 ret = -EINVAL;
540 }
541 raw_spin_unlock(&logbuf_lock);
542 return ret;
543 }
544
545 static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
546 {
547 struct devkmsg_user *user = file->private_data;
548 int ret = 0;
549
550 if (!user)
551 return POLLERR|POLLNVAL;
552
553 poll_wait(file, &log_wait, wait);
554
555 raw_spin_lock(&logbuf_lock);
556 if (user->seq < log_next_seq) {
557 /* return error when data has vanished underneath us */
558 if (user->seq < log_first_seq)
559 ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
560 ret = POLLIN|POLLRDNORM;
561 }
562 raw_spin_unlock(&logbuf_lock);
563
564 return ret;
565 }
566
567 static int devkmsg_open(struct inode *inode, struct file *file)
568 {
569 struct devkmsg_user *user;
570 int err;
571
572 /* write-only does not need any file context */
573 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
574 return 0;
575
576 err = security_syslog(SYSLOG_ACTION_READ_ALL);
577 if (err)
578 return err;
579
580 user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
581 if (!user)
582 return -ENOMEM;
583
584 mutex_init(&user->lock);
585
586 raw_spin_lock(&logbuf_lock);
587 user->idx = log_first_idx;
588 user->seq = log_first_seq;
589 raw_spin_unlock(&logbuf_lock);
590
591 file->private_data = user;
592 return 0;
593 }
594
595 static int devkmsg_release(struct inode *inode, struct file *file)
596 {
597 struct devkmsg_user *user = file->private_data;
598
599 if (!user)
600 return 0;
601
602 mutex_destroy(&user->lock);
603 kfree(user);
604 return 0;
605 }
606
607 const struct file_operations kmsg_fops = {
608 .open = devkmsg_open,
609 .read = devkmsg_read,
610 .aio_write = devkmsg_writev,
611 .llseek = devkmsg_llseek,
612 .poll = devkmsg_poll,
613 .release = devkmsg_release,
614 };
615
616 #ifdef CONFIG_KEXEC
617 /*
618 * This appends the listed symbols to /proc/vmcoreinfo
619 *
620 * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
621 * obtain access to symbols that are otherwise very difficult to locate. These
622 * symbols are specifically used so that utilities can access and extract the
623 * dmesg log from a vmcore file after a crash.
624 */
625 void log_buf_kexec_setup(void)
626 {
627 VMCOREINFO_SYMBOL(log_buf);
628 VMCOREINFO_SYMBOL(log_buf_len);
629 VMCOREINFO_SYMBOL(log_first_idx);
630 VMCOREINFO_SYMBOL(log_next_idx);
631 }
632 #endif
633
634 /* requested log_buf_len from kernel cmdline */
635 static unsigned long __initdata new_log_buf_len;
636
637 /* save requested log_buf_len since it's too early to process it */
638 static int __init log_buf_len_setup(char *str)
639 {
640 unsigned size = memparse(str, &str);
641
642 if (size)
643 size = roundup_pow_of_two(size);
644 if (size > log_buf_len)
645 new_log_buf_len = size;
646
647 return 0;
648 }
649 early_param("log_buf_len", log_buf_len_setup);
650
651 void __init setup_log_buf(int early)
652 {
653 unsigned long flags;
654 char *new_log_buf;
655 int free;
656
657 if (!new_log_buf_len)
658 return;
659
660 if (early) {
661 unsigned long mem;
662
663 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
664 if (!mem)
665 return;
666 new_log_buf = __va(mem);
667 } else {
668 new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
669 }
670
671 if (unlikely(!new_log_buf)) {
672 pr_err("log_buf_len: %ld bytes not available\n",
673 new_log_buf_len);
674 return;
675 }
676
677 raw_spin_lock_irqsave(&logbuf_lock, flags);
678 log_buf_len = new_log_buf_len;
679 log_buf = new_log_buf;
680 new_log_buf_len = 0;
681 free = __LOG_BUF_LEN - log_next_idx;
682 memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
683 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
684
685 pr_info("log_buf_len: %d\n", log_buf_len);
686 pr_info("early log buf free: %d(%d%%)\n",
687 free, (free * 100) / __LOG_BUF_LEN);
688 }
689
690 #ifdef CONFIG_BOOT_PRINTK_DELAY
691
692 static int boot_delay; /* msecs delay after each printk during bootup */
693 static unsigned long long loops_per_msec; /* based on boot_delay */
694
695 static int __init boot_delay_setup(char *str)
696 {
697 unsigned long lpj;
698
699 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
700 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
701
702 get_option(&str, &boot_delay);
703 if (boot_delay > 10 * 1000)
704 boot_delay = 0;
705
706 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
707 "HZ: %d, loops_per_msec: %llu\n",
708 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
709 return 1;
710 }
711 __setup("boot_delay=", boot_delay_setup);
712
713 static void boot_delay_msec(void)
714 {
715 unsigned long long k;
716 unsigned long timeout;
717
718 if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
719 return;
720
721 k = (unsigned long long)loops_per_msec * boot_delay;
722
723 timeout = jiffies + msecs_to_jiffies(boot_delay);
724 while (k) {
725 k--;
726 cpu_relax();
727 /*
728 * use (volatile) jiffies to prevent
729 * compiler reduction; loop termination via jiffies
730 * is secondary and may or may not happen.
731 */
732 if (time_after(jiffies, timeout))
733 break;
734 touch_nmi_watchdog();
735 }
736 }
737 #else
738 static inline void boot_delay_msec(void)
739 {
740 }
741 #endif
742
743 #ifdef CONFIG_SECURITY_DMESG_RESTRICT
744 int dmesg_restrict = 1;
745 #else
746 int dmesg_restrict;
747 #endif
748
749 static int syslog_action_restricted(int type)
750 {
751 if (dmesg_restrict)
752 return 1;
753 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
754 return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
755 }
756
757 static int check_syslog_permissions(int type, bool from_file)
758 {
759 /*
760 * If this is from /proc/kmsg and we've already opened it, then we've
761 * already done the capabilities checks at open time.
762 */
763 if (from_file && type != SYSLOG_ACTION_OPEN)
764 return 0;
765
766 if (syslog_action_restricted(type)) {
767 if (capable(CAP_SYSLOG))
768 return 0;
769 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
770 if (capable(CAP_SYS_ADMIN)) {
771 printk_once(KERN_WARNING "%s (%d): "
772 "Attempt to access syslog with CAP_SYS_ADMIN "
773 "but no CAP_SYSLOG (deprecated).\n",
774 current->comm, task_pid_nr(current));
775 return 0;
776 }
777 return -EPERM;
778 }
779 return 0;
780 }
781
782 #if defined(CONFIG_PRINTK_TIME)
783 static bool printk_time = 1;
784 #else
785 static bool printk_time;
786 #endif
787 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
788
789 static int syslog_print_line(u32 idx, char *text, size_t size)
790 {
791 struct log *msg;
792 size_t len;
793
794 msg = log_from_idx(idx);
795 if (!text) {
796 /* calculate length only */
797 len = 3;
798
799 if (msg->level > 9)
800 len++;
801 if (msg->level > 99)
802 len++;
803
804 if (printk_time)
805 len += 15;
806
807 len += msg->text_len;
808 len++;
809 return len;
810 }
811
812 len = sprintf(text, "<%u>", msg->level);
813
814 if (printk_time) {
815 unsigned long long t = msg->ts_nsec;
816 unsigned long rem_ns = do_div(t, 1000000000);
817
818 len += sprintf(text + len, "[%5lu.%06lu] ",
819 (unsigned long) t, rem_ns / 1000);
820 }
821
822 if (len + msg->text_len > size)
823 return -EINVAL;
824 memcpy(text + len, log_text(msg), msg->text_len);
825 len += msg->text_len;
826 text[len++] = '\n';
827 return len;
828 }
829
830 static int syslog_print(char __user *buf, int size)
831 {
832 char *text;
833 int len;
834
835 text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
836 if (!text)
837 return -ENOMEM;
838
839 raw_spin_lock_irq(&logbuf_lock);
840 if (syslog_seq < log_first_seq) {
841 /* messages are gone, move to first one */
842 syslog_seq = log_first_seq;
843 syslog_idx = log_first_idx;
844 }
845 len = syslog_print_line(syslog_idx, text, LOG_LINE_MAX);
846 syslog_idx = log_next(syslog_idx);
847 syslog_seq++;
848 raw_spin_unlock_irq(&logbuf_lock);
849
850 if (len > 0 && copy_to_user(buf, text, len))
851 len = -EFAULT;
852
853 kfree(text);
854 return len;
855 }
856
857 static int syslog_print_all(char __user *buf, int size, bool clear)
858 {
859 char *text;
860 int len = 0;
861
862 text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
863 if (!text)
864 return -ENOMEM;
865
866 raw_spin_lock_irq(&logbuf_lock);
867 if (buf) {
868 u64 next_seq;
869 u64 seq;
870 u32 idx;
871
872 if (clear_seq < log_first_seq) {
873 /* messages are gone, move to first available one */
874 clear_seq = log_first_seq;
875 clear_idx = log_first_idx;
876 }
877
878 /*
879 * Find first record that fits, including all following records,
880 * into the user-provided buffer for this dump.
881 */
882 seq = clear_seq;
883 idx = clear_idx;
884 while (seq < log_next_seq) {
885 len += syslog_print_line(idx, NULL, 0);
886 idx = log_next(idx);
887 seq++;
888 }
889 seq = clear_seq;
890 idx = clear_idx;
891 while (len > size && seq < log_next_seq) {
892 len -= syslog_print_line(idx, NULL, 0);
893 idx = log_next(idx);
894 seq++;
895 }
896
897 /* last message in this dump */
898 next_seq = log_next_seq;
899
900 len = 0;
901 while (len >= 0 && seq < next_seq) {
902 int textlen;
903
904 textlen = syslog_print_line(idx, text, LOG_LINE_MAX);
905 if (textlen < 0) {
906 len = textlen;
907 break;
908 }
909 idx = log_next(idx);
910 seq++;
911
912 raw_spin_unlock_irq(&logbuf_lock);
913 if (copy_to_user(buf + len, text, textlen))
914 len = -EFAULT;
915 else
916 len += textlen;
917 raw_spin_lock_irq(&logbuf_lock);
918
919 if (seq < log_first_seq) {
920 /* messages are gone, move to next one */
921 seq = log_first_seq;
922 idx = log_first_idx;
923 }
924 }
925 }
926
927 if (clear) {
928 clear_seq = log_next_seq;
929 clear_idx = log_next_idx;
930 }
931 raw_spin_unlock_irq(&logbuf_lock);
932
933 kfree(text);
934 return len;
935 }
936
937 int do_syslog(int type, char __user *buf, int len, bool from_file)
938 {
939 bool clear = false;
940 static int saved_console_loglevel = -1;
941 int error;
942
943 error = check_syslog_permissions(type, from_file);
944 if (error)
945 goto out;
946
947 error = security_syslog(type);
948 if (error)
949 return error;
950
951 switch (type) {
952 case SYSLOG_ACTION_CLOSE: /* Close log */
953 break;
954 case SYSLOG_ACTION_OPEN: /* Open log */
955 break;
956 case SYSLOG_ACTION_READ: /* Read from log */
957 error = -EINVAL;
958 if (!buf || len < 0)
959 goto out;
960 error = 0;
961 if (!len)
962 goto out;
963 if (!access_ok(VERIFY_WRITE, buf, len)) {
964 error = -EFAULT;
965 goto out;
966 }
967 error = wait_event_interruptible(log_wait,
968 syslog_seq != log_next_seq);
969 if (error)
970 goto out;
971 error = syslog_print(buf, len);
972 break;
973 /* Read/clear last kernel messages */
974 case SYSLOG_ACTION_READ_CLEAR:
975 clear = true;
976 /* FALL THRU */
977 /* Read last kernel messages */
978 case SYSLOG_ACTION_READ_ALL:
979 error = -EINVAL;
980 if (!buf || len < 0)
981 goto out;
982 error = 0;
983 if (!len)
984 goto out;
985 if (!access_ok(VERIFY_WRITE, buf, len)) {
986 error = -EFAULT;
987 goto out;
988 }
989 error = syslog_print_all(buf, len, clear);
990 break;
991 /* Clear ring buffer */
992 case SYSLOG_ACTION_CLEAR:
993 syslog_print_all(NULL, 0, true);
994 /* Disable logging to console */
995 case SYSLOG_ACTION_CONSOLE_OFF:
996 if (saved_console_loglevel == -1)
997 saved_console_loglevel = console_loglevel;
998 console_loglevel = minimum_console_loglevel;
999 break;
1000 /* Enable logging to console */
1001 case SYSLOG_ACTION_CONSOLE_ON:
1002 if (saved_console_loglevel != -1) {
1003 console_loglevel = saved_console_loglevel;
1004 saved_console_loglevel = -1;
1005 }
1006 break;
1007 /* Set level of messages printed to console */
1008 case SYSLOG_ACTION_CONSOLE_LEVEL:
1009 error = -EINVAL;
1010 if (len < 1 || len > 8)
1011 goto out;
1012 if (len < minimum_console_loglevel)
1013 len = minimum_console_loglevel;
1014 console_loglevel = len;
1015 /* Implicitly re-enable logging to console */
1016 saved_console_loglevel = -1;
1017 error = 0;
1018 break;
1019 /* Number of chars in the log buffer */
1020 case SYSLOG_ACTION_SIZE_UNREAD:
1021 raw_spin_lock_irq(&logbuf_lock);
1022 if (syslog_seq < log_first_seq) {
1023 /* messages are gone, move to first one */
1024 syslog_seq = log_first_seq;
1025 syslog_idx = log_first_idx;
1026 }
1027 if (from_file) {
1028 /*
1029 * Short-cut for poll(/"proc/kmsg") which simply checks
1030 * for pending data, not the size; return the count of
1031 * records, not the length.
1032 */
1033 error = log_next_idx - syslog_idx;
1034 } else {
1035 u64 seq;
1036 u32 idx;
1037
1038 error = 0;
1039 seq = syslog_seq;
1040 idx = syslog_idx;
1041 while (seq < log_next_seq) {
1042 error += syslog_print_line(idx, NULL, 0);
1043 idx = log_next(idx);
1044 seq++;
1045 }
1046 }
1047 raw_spin_unlock_irq(&logbuf_lock);
1048 break;
1049 /* Size of the log buffer */
1050 case SYSLOG_ACTION_SIZE_BUFFER:
1051 error = log_buf_len;
1052 break;
1053 default:
1054 error = -EINVAL;
1055 break;
1056 }
1057 out:
1058 return error;
1059 }
1060
1061 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1062 {
1063 return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
1064 }
1065
1066 #ifdef CONFIG_KGDB_KDB
1067 /* kdb dmesg command needs access to the syslog buffer. do_syslog()
1068 * uses locks so it cannot be used during debugging. Just tell kdb
1069 * where the start and end of the physical and logical logs are. This
1070 * is equivalent to do_syslog(3).
1071 */
1072 void kdb_syslog_data(char *syslog_data[4])
1073 {
1074 syslog_data[0] = log_buf;
1075 syslog_data[1] = log_buf + log_buf_len;
1076 syslog_data[2] = log_buf + log_first_idx;
1077 syslog_data[3] = log_buf + log_next_idx;
1078 }
1079 #endif /* CONFIG_KGDB_KDB */
1080
1081 static bool __read_mostly ignore_loglevel;
1082
1083 static int __init ignore_loglevel_setup(char *str)
1084 {
1085 ignore_loglevel = 1;
1086 printk(KERN_INFO "debug: ignoring loglevel setting.\n");
1087
1088 return 0;
1089 }
1090
1091 early_param("ignore_loglevel", ignore_loglevel_setup);
1092 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1093 MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
1094 "print all kernel messages to the console.");
1095
1096 /*
1097 * Call the console drivers, asking them to write out
1098 * log_buf[start] to log_buf[end - 1].
1099 * The console_lock must be held.
1100 */
1101 static void call_console_drivers(int level, const char *text, size_t len)
1102 {
1103 struct console *con;
1104
1105 trace_console(text, 0, len, len);
1106
1107 if (level >= console_loglevel && !ignore_loglevel)
1108 return;
1109 if (!console_drivers)
1110 return;
1111
1112 for_each_console(con) {
1113 if (exclusive_console && con != exclusive_console)
1114 continue;
1115 if (!(con->flags & CON_ENABLED))
1116 continue;
1117 if (!con->write)
1118 continue;
1119 if (!cpu_online(smp_processor_id()) &&
1120 !(con->flags & CON_ANYTIME))
1121 continue;
1122 con->write(con, text, len);
1123 }
1124 }
1125
1126 /*
1127 * Zap console related locks when oopsing. Only zap at most once
1128 * every 10 seconds, to leave time for slow consoles to print a
1129 * full oops.
1130 */
1131 static void zap_locks(void)
1132 {
1133 static unsigned long oops_timestamp;
1134
1135 if (time_after_eq(jiffies, oops_timestamp) &&
1136 !time_after(jiffies, oops_timestamp + 30 * HZ))
1137 return;
1138
1139 oops_timestamp = jiffies;
1140
1141 debug_locks_off();
1142 /* If a crash is occurring, make sure we can't deadlock */
1143 raw_spin_lock_init(&logbuf_lock);
1144 /* And make sure that we print immediately */
1145 sema_init(&console_sem, 1);
1146 }
1147
1148 /* Check if we have any console registered that can be called early in boot. */
1149 static int have_callable_console(void)
1150 {
1151 struct console *con;
1152
1153 for_each_console(con)
1154 if (con->flags & CON_ANYTIME)
1155 return 1;
1156
1157 return 0;
1158 }
1159
1160 /*
1161 * Can we actually use the console at this time on this cpu?
1162 *
1163 * Console drivers may assume that per-cpu resources have
1164 * been allocated. So unless they're explicitly marked as
1165 * being able to cope (CON_ANYTIME) don't call them until
1166 * this CPU is officially up.
1167 */
1168 static inline int can_use_console(unsigned int cpu)
1169 {
1170 return cpu_online(cpu) || have_callable_console();
1171 }
1172
1173 /*
1174 * Try to get console ownership to actually show the kernel
1175 * messages from a 'printk'. Return true (and with the
1176 * console_lock held, and 'console_locked' set) if it
1177 * is successful, false otherwise.
1178 *
1179 * This gets called with the 'logbuf_lock' spinlock held and
1180 * interrupts disabled. It should return with 'lockbuf_lock'
1181 * released but interrupts still disabled.
1182 */
1183 static int console_trylock_for_printk(unsigned int cpu)
1184 __releases(&logbuf_lock)
1185 {
1186 int retval = 0, wake = 0;
1187
1188 if (console_trylock()) {
1189 retval = 1;
1190
1191 /*
1192 * If we can't use the console, we need to release
1193 * the console semaphore by hand to avoid flushing
1194 * the buffer. We need to hold the console semaphore
1195 * in order to do this test safely.
1196 */
1197 if (!can_use_console(cpu)) {
1198 console_locked = 0;
1199 wake = 1;
1200 retval = 0;
1201 }
1202 }
1203 logbuf_cpu = UINT_MAX;
1204 if (wake)
1205 up(&console_sem);
1206 raw_spin_unlock(&logbuf_lock);
1207 return retval;
1208 }
1209
1210 int printk_delay_msec __read_mostly;
1211
1212 static inline void printk_delay(void)
1213 {
1214 if (unlikely(printk_delay_msec)) {
1215 int m = printk_delay_msec;
1216
1217 while (m--) {
1218 mdelay(1);
1219 touch_nmi_watchdog();
1220 }
1221 }
1222 }
1223
1224 asmlinkage int vprintk_emit(int facility, int level,
1225 const char *dict, size_t dictlen,
1226 const char *fmt, va_list args)
1227 {
1228 static int recursion_bug;
1229 static char buf[LOG_LINE_MAX];
1230 static size_t buflen;
1231 static int buflevel;
1232 static char textbuf[LOG_LINE_MAX];
1233 static struct task_struct *cont;
1234 char *text = textbuf;
1235 size_t textlen;
1236 unsigned long flags;
1237 int this_cpu;
1238 bool newline = false;
1239 bool prefix = false;
1240 int printed_len = 0;
1241
1242 boot_delay_msec();
1243 printk_delay();
1244
1245 /* This stops the holder of console_sem just where we want him */
1246 local_irq_save(flags);
1247 this_cpu = smp_processor_id();
1248
1249 /*
1250 * Ouch, printk recursed into itself!
1251 */
1252 if (unlikely(logbuf_cpu == this_cpu)) {
1253 /*
1254 * If a crash is occurring during printk() on this CPU,
1255 * then try to get the crash message out but make sure
1256 * we can't deadlock. Otherwise just return to avoid the
1257 * recursion and return - but flag the recursion so that
1258 * it can be printed at the next appropriate moment:
1259 */
1260 if (!oops_in_progress && !lockdep_recursing(current)) {
1261 recursion_bug = 1;
1262 goto out_restore_irqs;
1263 }
1264 zap_locks();
1265 }
1266
1267 lockdep_off();
1268 raw_spin_lock(&logbuf_lock);
1269 logbuf_cpu = this_cpu;
1270
1271 if (recursion_bug) {
1272 static const char recursion_msg[] =
1273 "BUG: recent printk recursion!";
1274
1275 recursion_bug = 0;
1276 printed_len += strlen(recursion_msg);
1277 /* emit KERN_CRIT message */
1278 log_store(0, 2, NULL, 0, recursion_msg, printed_len);
1279 }
1280
1281 /*
1282 * The printf needs to come first; we need the syslog
1283 * prefix which might be passed-in as a parameter.
1284 */
1285 textlen = vscnprintf(text, sizeof(textbuf), fmt, args);
1286
1287 /* mark and strip a trailing newline */
1288 if (textlen && text[textlen-1] == '\n') {
1289 textlen--;
1290 newline = true;
1291 }
1292
1293 /* strip syslog prefix and extract log level or flags */
1294 if (text[0] == '<' && text[1] && text[2] == '>') {
1295 switch (text[1]) {
1296 case '0' ... '7':
1297 if (level == -1)
1298 level = text[1] - '0';
1299 case 'd': /* KERN_DEFAULT */
1300 prefix = true;
1301 case 'c': /* KERN_CONT */
1302 text += 3;
1303 textlen -= 3;
1304 }
1305 }
1306
1307 if (buflen && (prefix || dict || cont != current)) {
1308 /* flush existing buffer */
1309 log_store(facility, buflevel, NULL, 0, buf, buflen);
1310 printed_len += buflen;
1311 buflen = 0;
1312 }
1313
1314 if (buflen == 0) {
1315 /* remember level for first message in the buffer */
1316 if (level == -1)
1317 buflevel = default_message_loglevel;
1318 else
1319 buflevel = level;
1320 }
1321
1322 if (buflen || !newline) {
1323 /* append to existing buffer, or buffer until next message */
1324 if (buflen + textlen > sizeof(buf))
1325 textlen = sizeof(buf) - buflen;
1326 memcpy(buf + buflen, text, textlen);
1327 buflen += textlen;
1328 }
1329
1330 if (newline) {
1331 /* end of line; flush buffer */
1332 if (buflen) {
1333 log_store(facility, buflevel,
1334 dict, dictlen, buf, buflen);
1335 printed_len += buflen;
1336 buflen = 0;
1337 } else {
1338 log_store(facility, buflevel,
1339 dict, dictlen, text, textlen);
1340 printed_len += textlen;
1341 }
1342 cont = NULL;
1343 } else {
1344 /* remember thread which filled the buffer */
1345 cont = current;
1346 }
1347
1348 /*
1349 * Try to acquire and then immediately release the console semaphore.
1350 * The release will print out buffers and wake up /dev/kmsg and syslog()
1351 * users.
1352 *
1353 * The console_trylock_for_printk() function will release 'logbuf_lock'
1354 * regardless of whether it actually gets the console semaphore or not.
1355 */
1356 if (console_trylock_for_printk(this_cpu))
1357 console_unlock();
1358
1359 lockdep_on();
1360 out_restore_irqs:
1361 local_irq_restore(flags);
1362
1363 return printed_len;
1364 }
1365 EXPORT_SYMBOL(vprintk_emit);
1366
1367 asmlinkage int vprintk(const char *fmt, va_list args)
1368 {
1369 return vprintk_emit(0, -1, NULL, 0, fmt, args);
1370 }
1371 EXPORT_SYMBOL(vprintk);
1372
1373 asmlinkage int printk_emit(int facility, int level,
1374 const char *dict, size_t dictlen,
1375 const char *fmt, ...)
1376 {
1377 va_list args;
1378 int r;
1379
1380 va_start(args, fmt);
1381 r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
1382 va_end(args);
1383
1384 return r;
1385 }
1386 EXPORT_SYMBOL(printk_emit);
1387
1388 /**
1389 * printk - print a kernel message
1390 * @fmt: format string
1391 *
1392 * This is printk(). It can be called from any context. We want it to work.
1393 *
1394 * We try to grab the console_lock. If we succeed, it's easy - we log the
1395 * output and call the console drivers. If we fail to get the semaphore, we
1396 * place the output into the log buffer and return. The current holder of
1397 * the console_sem will notice the new output in console_unlock(); and will
1398 * send it to the consoles before releasing the lock.
1399 *
1400 * One effect of this deferred printing is that code which calls printk() and
1401 * then changes console_loglevel may break. This is because console_loglevel
1402 * is inspected when the actual printing occurs.
1403 *
1404 * See also:
1405 * printf(3)
1406 *
1407 * See the vsnprintf() documentation for format string extensions over C99.
1408 */
1409 asmlinkage int printk(const char *fmt, ...)
1410 {
1411 va_list args;
1412 int r;
1413
1414 #ifdef CONFIG_KGDB_KDB
1415 if (unlikely(kdb_trap_printk)) {
1416 va_start(args, fmt);
1417 r = vkdb_printf(fmt, args);
1418 va_end(args);
1419 return r;
1420 }
1421 #endif
1422 va_start(args, fmt);
1423 r = vprintk_emit(0, -1, NULL, 0, fmt, args);
1424 va_end(args);
1425
1426 return r;
1427 }
1428 EXPORT_SYMBOL(printk);
1429
1430 #else
1431
1432 #define LOG_LINE_MAX 0
1433 static struct log *log_from_idx(u32 idx) { return NULL; }
1434 static u32 log_next(u32 idx) { return 0; }
1435 static char *log_text(const struct log *msg) { return NULL; }
1436 static void call_console_drivers(int level, const char *text, size_t len) {}
1437
1438 #endif /* CONFIG_PRINTK */
1439
1440 static int __add_preferred_console(char *name, int idx, char *options,
1441 char *brl_options)
1442 {
1443 struct console_cmdline *c;
1444 int i;
1445
1446 /*
1447 * See if this tty is not yet registered, and
1448 * if we have a slot free.
1449 */
1450 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
1451 if (strcmp(console_cmdline[i].name, name) == 0 &&
1452 console_cmdline[i].index == idx) {
1453 if (!brl_options)
1454 selected_console = i;
1455 return 0;
1456 }
1457 if (i == MAX_CMDLINECONSOLES)
1458 return -E2BIG;
1459 if (!brl_options)
1460 selected_console = i;
1461 c = &console_cmdline[i];
1462 strlcpy(c->name, name, sizeof(c->name));
1463 c->options = options;
1464 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1465 c->brl_options = brl_options;
1466 #endif
1467 c->index = idx;
1468 return 0;
1469 }
1470 /*
1471 * Set up a list of consoles. Called from init/main.c
1472 */
1473 static int __init console_setup(char *str)
1474 {
1475 char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
1476 char *s, *options, *brl_options = NULL;
1477 int idx;
1478
1479 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1480 if (!memcmp(str, "brl,", 4)) {
1481 brl_options = "";
1482 str += 4;
1483 } else if (!memcmp(str, "brl=", 4)) {
1484 brl_options = str + 4;
1485 str = strchr(brl_options, ',');
1486 if (!str) {
1487 printk(KERN_ERR "need port name after brl=\n");
1488 return 1;
1489 }
1490 *(str++) = 0;
1491 }
1492 #endif
1493
1494 /*
1495 * Decode str into name, index, options.
1496 */
1497 if (str[0] >= '0' && str[0] <= '9') {
1498 strcpy(buf, "ttyS");
1499 strncpy(buf + 4, str, sizeof(buf) - 5);
1500 } else {
1501 strncpy(buf, str, sizeof(buf) - 1);
1502 }
1503 buf[sizeof(buf) - 1] = 0;
1504 if ((options = strchr(str, ',')) != NULL)
1505 *(options++) = 0;
1506 #ifdef __sparc__
1507 if (!strcmp(str, "ttya"))
1508 strcpy(buf, "ttyS0");
1509 if (!strcmp(str, "ttyb"))
1510 strcpy(buf, "ttyS1");
1511 #endif
1512 for (s = buf; *s; s++)
1513 if ((*s >= '0' && *s <= '9') || *s == ',')
1514 break;
1515 idx = simple_strtoul(s, NULL, 10);
1516 *s = 0;
1517
1518 __add_preferred_console(buf, idx, options, brl_options);
1519 console_set_on_cmdline = 1;
1520 return 1;
1521 }
1522 __setup("console=", console_setup);
1523
1524 /**
1525 * add_preferred_console - add a device to the list of preferred consoles.
1526 * @name: device name
1527 * @idx: device index
1528 * @options: options for this console
1529 *
1530 * The last preferred console added will be used for kernel messages
1531 * and stdin/out/err for init. Normally this is used by console_setup
1532 * above to handle user-supplied console arguments; however it can also
1533 * be used by arch-specific code either to override the user or more
1534 * commonly to provide a default console (ie from PROM variables) when
1535 * the user has not supplied one.
1536 */
1537 int add_preferred_console(char *name, int idx, char *options)
1538 {
1539 return __add_preferred_console(name, idx, options, NULL);
1540 }
1541
1542 int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
1543 {
1544 struct console_cmdline *c;
1545 int i;
1546
1547 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
1548 if (strcmp(console_cmdline[i].name, name) == 0 &&
1549 console_cmdline[i].index == idx) {
1550 c = &console_cmdline[i];
1551 strlcpy(c->name, name_new, sizeof(c->name));
1552 c->name[sizeof(c->name) - 1] = 0;
1553 c->options = options;
1554 c->index = idx_new;
1555 return i;
1556 }
1557 /* not found */
1558 return -1;
1559 }
1560
1561 bool console_suspend_enabled = 1;
1562 EXPORT_SYMBOL(console_suspend_enabled);
1563
1564 static int __init console_suspend_disable(char *str)
1565 {
1566 console_suspend_enabled = 0;
1567 return 1;
1568 }
1569 __setup("no_console_suspend", console_suspend_disable);
1570 module_param_named(console_suspend, console_suspend_enabled,
1571 bool, S_IRUGO | S_IWUSR);
1572 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
1573 " and hibernate operations");
1574
1575 /**
1576 * suspend_console - suspend the console subsystem
1577 *
1578 * This disables printk() while we go into suspend states
1579 */
1580 void suspend_console(void)
1581 {
1582 if (!console_suspend_enabled)
1583 return;
1584 printk("Suspending console(s) (use no_console_suspend to debug)\n");
1585 console_lock();
1586 console_suspended = 1;
1587 up(&console_sem);
1588 }
1589
1590 void resume_console(void)
1591 {
1592 if (!console_suspend_enabled)
1593 return;
1594 down(&console_sem);
1595 console_suspended = 0;
1596 console_unlock();
1597 }
1598
1599 /**
1600 * console_cpu_notify - print deferred console messages after CPU hotplug
1601 * @self: notifier struct
1602 * @action: CPU hotplug event
1603 * @hcpu: unused
1604 *
1605 * If printk() is called from a CPU that is not online yet, the messages
1606 * will be spooled but will not show up on the console. This function is
1607 * called when a new CPU comes online (or fails to come up), and ensures
1608 * that any such output gets printed.
1609 */
1610 static int __cpuinit console_cpu_notify(struct notifier_block *self,
1611 unsigned long action, void *hcpu)
1612 {
1613 switch (action) {
1614 case CPU_ONLINE:
1615 case CPU_DEAD:
1616 case CPU_DYING:
1617 case CPU_DOWN_FAILED:
1618 case CPU_UP_CANCELED:
1619 console_lock();
1620 console_unlock();
1621 }
1622 return NOTIFY_OK;
1623 }
1624
1625 /**
1626 * console_lock - lock the console system for exclusive use.
1627 *
1628 * Acquires a lock which guarantees that the caller has
1629 * exclusive access to the console system and the console_drivers list.
1630 *
1631 * Can sleep, returns nothing.
1632 */
1633 void console_lock(void)
1634 {
1635 BUG_ON(in_interrupt());
1636 down(&console_sem);
1637 if (console_suspended)
1638 return;
1639 console_locked = 1;
1640 console_may_schedule = 1;
1641 }
1642 EXPORT_SYMBOL(console_lock);
1643
1644 /**
1645 * console_trylock - try to lock the console system for exclusive use.
1646 *
1647 * Tried to acquire a lock which guarantees that the caller has
1648 * exclusive access to the console system and the console_drivers list.
1649 *
1650 * returns 1 on success, and 0 on failure to acquire the lock.
1651 */
1652 int console_trylock(void)
1653 {
1654 if (down_trylock(&console_sem))
1655 return 0;
1656 if (console_suspended) {
1657 up(&console_sem);
1658 return 0;
1659 }
1660 console_locked = 1;
1661 console_may_schedule = 0;
1662 return 1;
1663 }
1664 EXPORT_SYMBOL(console_trylock);
1665
1666 int is_console_locked(void)
1667 {
1668 return console_locked;
1669 }
1670
1671 /*
1672 * Delayed printk version, for scheduler-internal messages:
1673 */
1674 #define PRINTK_BUF_SIZE 512
1675
1676 #define PRINTK_PENDING_WAKEUP 0x01
1677 #define PRINTK_PENDING_SCHED 0x02
1678
1679 static DEFINE_PER_CPU(int, printk_pending);
1680 static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1681
1682 void printk_tick(void)
1683 {
1684 if (__this_cpu_read(printk_pending)) {
1685 int pending = __this_cpu_xchg(printk_pending, 0);
1686 if (pending & PRINTK_PENDING_SCHED) {
1687 char *buf = __get_cpu_var(printk_sched_buf);
1688 printk(KERN_WARNING "[sched_delayed] %s", buf);
1689 }
1690 if (pending & PRINTK_PENDING_WAKEUP)
1691 wake_up_interruptible(&log_wait);
1692 }
1693 }
1694
1695 int printk_needs_cpu(int cpu)
1696 {
1697 if (cpu_is_offline(cpu))
1698 printk_tick();
1699 return __this_cpu_read(printk_pending);
1700 }
1701
1702 void wake_up_klogd(void)
1703 {
1704 if (waitqueue_active(&log_wait))
1705 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1706 }
1707
1708 /* the next printk record to write to the console */
1709 static u64 console_seq;
1710 static u32 console_idx;
1711
1712 /**
1713 * console_unlock - unlock the console system
1714 *
1715 * Releases the console_lock which the caller holds on the console system
1716 * and the console driver list.
1717 *
1718 * While the console_lock was held, console output may have been buffered
1719 * by printk(). If this is the case, console_unlock(); emits
1720 * the output prior to releasing the lock.
1721 *
1722 * If there is output waiting, we wake /dev/kmsg and syslog() users.
1723 *
1724 * console_unlock(); may be called from any context.
1725 */
1726 void console_unlock(void)
1727 {
1728 static u64 seen_seq;
1729 unsigned long flags;
1730 bool wake_klogd = false;
1731 bool retry;
1732
1733 if (console_suspended) {
1734 up(&console_sem);
1735 return;
1736 }
1737
1738 console_may_schedule = 0;
1739
1740 again:
1741 for (;;) {
1742 struct log *msg;
1743 static char text[LOG_LINE_MAX];
1744 size_t len;
1745 int level;
1746
1747 raw_spin_lock_irqsave(&logbuf_lock, flags);
1748 if (seen_seq != log_next_seq) {
1749 wake_klogd = true;
1750 seen_seq = log_next_seq;
1751 }
1752
1753 if (console_seq < log_first_seq) {
1754 /* messages are gone, move to first one */
1755 console_seq = log_first_seq;
1756 console_idx = log_first_idx;
1757 }
1758
1759 if (console_seq == log_next_seq)
1760 break;
1761
1762 msg = log_from_idx(console_idx);
1763 level = msg->level & 7;
1764 len = msg->text_len;
1765 if (len+1 >= sizeof(text))
1766 len = sizeof(text)-1;
1767 memcpy(text, log_text(msg), len);
1768 text[len++] = '\n';
1769
1770 console_idx = log_next(console_idx);
1771 console_seq++;
1772 raw_spin_unlock(&logbuf_lock);
1773
1774 stop_critical_timings(); /* don't trace print latency */
1775 call_console_drivers(level, text, len);
1776 start_critical_timings();
1777 local_irq_restore(flags);
1778 }
1779 console_locked = 0;
1780
1781 /* Release the exclusive_console once it is used */
1782 if (unlikely(exclusive_console))
1783 exclusive_console = NULL;
1784
1785 raw_spin_unlock(&logbuf_lock);
1786
1787 up(&console_sem);
1788
1789 /*
1790 * Someone could have filled up the buffer again, so re-check if there's
1791 * something to flush. In case we cannot trylock the console_sem again,
1792 * there's a new owner and the console_unlock() from them will do the
1793 * flush, no worries.
1794 */
1795 raw_spin_lock(&logbuf_lock);
1796 retry = console_seq != log_next_seq;
1797 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1798
1799 if (retry && console_trylock())
1800 goto again;
1801
1802 if (wake_klogd)
1803 wake_up_klogd();
1804 }
1805 EXPORT_SYMBOL(console_unlock);
1806
1807 /**
1808 * console_conditional_schedule - yield the CPU if required
1809 *
1810 * If the console code is currently allowed to sleep, and
1811 * if this CPU should yield the CPU to another task, do
1812 * so here.
1813 *
1814 * Must be called within console_lock();.
1815 */
1816 void __sched console_conditional_schedule(void)
1817 {
1818 if (console_may_schedule)
1819 cond_resched();
1820 }
1821 EXPORT_SYMBOL(console_conditional_schedule);
1822
1823 void console_unblank(void)
1824 {
1825 struct console *c;
1826
1827 /*
1828 * console_unblank can no longer be called in interrupt context unless
1829 * oops_in_progress is set to 1..
1830 */
1831 if (oops_in_progress) {
1832 if (down_trylock(&console_sem) != 0)
1833 return;
1834 } else
1835 console_lock();
1836
1837 console_locked = 1;
1838 console_may_schedule = 0;
1839 for_each_console(c)
1840 if ((c->flags & CON_ENABLED) && c->unblank)
1841 c->unblank();
1842 console_unlock();
1843 }
1844
1845 /*
1846 * Return the console tty driver structure and its associated index
1847 */
1848 struct tty_driver *console_device(int *index)
1849 {
1850 struct console *c;
1851 struct tty_driver *driver = NULL;
1852
1853 console_lock();
1854 for_each_console(c) {
1855 if (!c->device)
1856 continue;
1857 driver = c->device(c, index);
1858 if (driver)
1859 break;
1860 }
1861 console_unlock();
1862 return driver;
1863 }
1864
1865 /*
1866 * Prevent further output on the passed console device so that (for example)
1867 * serial drivers can disable console output before suspending a port, and can
1868 * re-enable output afterwards.
1869 */
1870 void console_stop(struct console *console)
1871 {
1872 console_lock();
1873 console->flags &= ~CON_ENABLED;
1874 console_unlock();
1875 }
1876 EXPORT_SYMBOL(console_stop);
1877
1878 void console_start(struct console *console)
1879 {
1880 console_lock();
1881 console->flags |= CON_ENABLED;
1882 console_unlock();
1883 }
1884 EXPORT_SYMBOL(console_start);
1885
1886 static int __read_mostly keep_bootcon;
1887
1888 static int __init keep_bootcon_setup(char *str)
1889 {
1890 keep_bootcon = 1;
1891 printk(KERN_INFO "debug: skip boot console de-registration.\n");
1892
1893 return 0;
1894 }
1895
1896 early_param("keep_bootcon", keep_bootcon_setup);
1897
1898 /*
1899 * The console driver calls this routine during kernel initialization
1900 * to register the console printing procedure with printk() and to
1901 * print any messages that were printed by the kernel before the
1902 * console driver was initialized.
1903 *
1904 * This can happen pretty early during the boot process (because of
1905 * early_printk) - sometimes before setup_arch() completes - be careful
1906 * of what kernel features are used - they may not be initialised yet.
1907 *
1908 * There are two types of consoles - bootconsoles (early_printk) and
1909 * "real" consoles (everything which is not a bootconsole) which are
1910 * handled differently.
1911 * - Any number of bootconsoles can be registered at any time.
1912 * - As soon as a "real" console is registered, all bootconsoles
1913 * will be unregistered automatically.
1914 * - Once a "real" console is registered, any attempt to register a
1915 * bootconsoles will be rejected
1916 */
1917 void register_console(struct console *newcon)
1918 {
1919 int i;
1920 unsigned long flags;
1921 struct console *bcon = NULL;
1922
1923 /*
1924 * before we register a new CON_BOOT console, make sure we don't
1925 * already have a valid console
1926 */
1927 if (console_drivers && newcon->flags & CON_BOOT) {
1928 /* find the last or real console */
1929 for_each_console(bcon) {
1930 if (!(bcon->flags & CON_BOOT)) {
1931 printk(KERN_INFO "Too late to register bootconsole %s%d\n",
1932 newcon->name, newcon->index);
1933 return;
1934 }
1935 }
1936 }
1937
1938 if (console_drivers && console_drivers->flags & CON_BOOT)
1939 bcon = console_drivers;
1940
1941 if (preferred_console < 0 || bcon || !console_drivers)
1942 preferred_console = selected_console;
1943
1944 if (newcon->early_setup)
1945 newcon->early_setup();
1946
1947 /*
1948 * See if we want to use this console driver. If we
1949 * didn't select a console we take the first one
1950 * that registers here.
1951 */
1952 if (preferred_console < 0) {
1953 if (newcon->index < 0)
1954 newcon->index = 0;
1955 if (newcon->setup == NULL ||
1956 newcon->setup(newcon, NULL) == 0) {
1957 newcon->flags |= CON_ENABLED;
1958 if (newcon->device) {
1959 newcon->flags |= CON_CONSDEV;
1960 preferred_console = 0;
1961 }
1962 }
1963 }
1964
1965 /*
1966 * See if this console matches one we selected on
1967 * the command line.
1968 */
1969 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
1970 i++) {
1971 if (strcmp(console_cmdline[i].name, newcon->name) != 0)
1972 continue;
1973 if (newcon->index >= 0 &&
1974 newcon->index != console_cmdline[i].index)
1975 continue;
1976 if (newcon->index < 0)
1977 newcon->index = console_cmdline[i].index;
1978 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1979 if (console_cmdline[i].brl_options) {
1980 newcon->flags |= CON_BRL;
1981 braille_register_console(newcon,
1982 console_cmdline[i].index,
1983 console_cmdline[i].options,
1984 console_cmdline[i].brl_options);
1985 return;
1986 }
1987 #endif
1988 if (newcon->setup &&
1989 newcon->setup(newcon, console_cmdline[i].options) != 0)
1990 break;
1991 newcon->flags |= CON_ENABLED;
1992 newcon->index = console_cmdline[i].index;
1993 if (i == selected_console) {
1994 newcon->flags |= CON_CONSDEV;
1995 preferred_console = selected_console;
1996 }
1997 break;
1998 }
1999
2000 if (!(newcon->flags & CON_ENABLED))
2001 return;
2002
2003 /*
2004 * If we have a bootconsole, and are switching to a real console,
2005 * don't print everything out again, since when the boot console, and
2006 * the real console are the same physical device, it's annoying to
2007 * see the beginning boot messages twice
2008 */
2009 if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
2010 newcon->flags &= ~CON_PRINTBUFFER;
2011
2012 /*
2013 * Put this console in the list - keep the
2014 * preferred driver at the head of the list.
2015 */
2016 console_lock();
2017 if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
2018 newcon->next = console_drivers;
2019 console_drivers = newcon;
2020 if (newcon->next)
2021 newcon->next->flags &= ~CON_CONSDEV;
2022 } else {
2023 newcon->next = console_drivers->next;
2024 console_drivers->next = newcon;
2025 }
2026 if (newcon->flags & CON_PRINTBUFFER) {
2027 /*
2028 * console_unlock(); will print out the buffered messages
2029 * for us.
2030 */
2031 raw_spin_lock_irqsave(&logbuf_lock, flags);
2032 console_seq = syslog_seq;
2033 console_idx = syslog_idx;
2034 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2035 /*
2036 * We're about to replay the log buffer. Only do this to the
2037 * just-registered console to avoid excessive message spam to
2038 * the already-registered consoles.
2039 */
2040 exclusive_console = newcon;
2041 }
2042 console_unlock();
2043 console_sysfs_notify();
2044
2045 /*
2046 * By unregistering the bootconsoles after we enable the real console
2047 * we get the "console xxx enabled" message on all the consoles -
2048 * boot consoles, real consoles, etc - this is to ensure that end
2049 * users know there might be something in the kernel's log buffer that
2050 * went to the bootconsole (that they do not see on the real console)
2051 */
2052 if (bcon &&
2053 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
2054 !keep_bootcon) {
2055 /* we need to iterate through twice, to make sure we print
2056 * everything out, before we unregister the console(s)
2057 */
2058 printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
2059 newcon->name, newcon->index);
2060 for_each_console(bcon)
2061 if (bcon->flags & CON_BOOT)
2062 unregister_console(bcon);
2063 } else {
2064 printk(KERN_INFO "%sconsole [%s%d] enabled\n",
2065 (newcon->flags & CON_BOOT) ? "boot" : "" ,
2066 newcon->name, newcon->index);
2067 }
2068 }
2069 EXPORT_SYMBOL(register_console);
2070
2071 int unregister_console(struct console *console)
2072 {
2073 struct console *a, *b;
2074 int res = 1;
2075
2076 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
2077 if (console->flags & CON_BRL)
2078 return braille_unregister_console(console);
2079 #endif
2080
2081 console_lock();
2082 if (console_drivers == console) {
2083 console_drivers=console->next;
2084 res = 0;
2085 } else if (console_drivers) {
2086 for (a=console_drivers->next, b=console_drivers ;
2087 a; b=a, a=b->next) {
2088 if (a == console) {
2089 b->next = a->next;
2090 res = 0;
2091 break;
2092 }
2093 }
2094 }
2095
2096 /*
2097 * If this isn't the last console and it has CON_CONSDEV set, we
2098 * need to set it on the next preferred console.
2099 */
2100 if (console_drivers != NULL && console->flags & CON_CONSDEV)
2101 console_drivers->flags |= CON_CONSDEV;
2102
2103 console_unlock();
2104 console_sysfs_notify();
2105 return res;
2106 }
2107 EXPORT_SYMBOL(unregister_console);
2108
2109 static int __init printk_late_init(void)
2110 {
2111 struct console *con;
2112
2113 for_each_console(con) {
2114 if (!keep_bootcon && con->flags & CON_BOOT) {
2115 printk(KERN_INFO "turn off boot console %s%d\n",
2116 con->name, con->index);
2117 unregister_console(con);
2118 }
2119 }
2120 hotcpu_notifier(console_cpu_notify, 0);
2121 return 0;
2122 }
2123 late_initcall(printk_late_init);
2124
2125 #if defined CONFIG_PRINTK
2126
2127 int printk_sched(const char *fmt, ...)
2128 {
2129 unsigned long flags;
2130 va_list args;
2131 char *buf;
2132 int r;
2133
2134 local_irq_save(flags);
2135 buf = __get_cpu_var(printk_sched_buf);
2136
2137 va_start(args, fmt);
2138 r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
2139 va_end(args);
2140
2141 __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
2142 local_irq_restore(flags);
2143
2144 return r;
2145 }
2146
2147 /*
2148 * printk rate limiting, lifted from the networking subsystem.
2149 *
2150 * This enforces a rate limit: not more than 10 kernel messages
2151 * every 5s to make a denial-of-service attack impossible.
2152 */
2153 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
2154
2155 int __printk_ratelimit(const char *func)
2156 {
2157 return ___ratelimit(&printk_ratelimit_state, func);
2158 }
2159 EXPORT_SYMBOL(__printk_ratelimit);
2160
2161 /**
2162 * printk_timed_ratelimit - caller-controlled printk ratelimiting
2163 * @caller_jiffies: pointer to caller's state
2164 * @interval_msecs: minimum interval between prints
2165 *
2166 * printk_timed_ratelimit() returns true if more than @interval_msecs
2167 * milliseconds have elapsed since the last time printk_timed_ratelimit()
2168 * returned true.
2169 */
2170 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
2171 unsigned int interval_msecs)
2172 {
2173 if (*caller_jiffies == 0
2174 || !time_in_range(jiffies, *caller_jiffies,
2175 *caller_jiffies
2176 + msecs_to_jiffies(interval_msecs))) {
2177 *caller_jiffies = jiffies;
2178 return true;
2179 }
2180 return false;
2181 }
2182 EXPORT_SYMBOL(printk_timed_ratelimit);
2183
2184 static DEFINE_SPINLOCK(dump_list_lock);
2185 static LIST_HEAD(dump_list);
2186
2187 /**
2188 * kmsg_dump_register - register a kernel log dumper.
2189 * @dumper: pointer to the kmsg_dumper structure
2190 *
2191 * Adds a kernel log dumper to the system. The dump callback in the
2192 * structure will be called when the kernel oopses or panics and must be
2193 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
2194 */
2195 int kmsg_dump_register(struct kmsg_dumper *dumper)
2196 {
2197 unsigned long flags;
2198 int err = -EBUSY;
2199
2200 /* The dump callback needs to be set */
2201 if (!dumper->dump)
2202 return -EINVAL;
2203
2204 spin_lock_irqsave(&dump_list_lock, flags);
2205 /* Don't allow registering multiple times */
2206 if (!dumper->registered) {
2207 dumper->registered = 1;
2208 list_add_tail_rcu(&dumper->list, &dump_list);
2209 err = 0;
2210 }
2211 spin_unlock_irqrestore(&dump_list_lock, flags);
2212
2213 return err;
2214 }
2215 EXPORT_SYMBOL_GPL(kmsg_dump_register);
2216
2217 /**
2218 * kmsg_dump_unregister - unregister a kmsg dumper.
2219 * @dumper: pointer to the kmsg_dumper structure
2220 *
2221 * Removes a dump device from the system. Returns zero on success and
2222 * %-EINVAL otherwise.
2223 */
2224 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
2225 {
2226 unsigned long flags;
2227 int err = -EINVAL;
2228
2229 spin_lock_irqsave(&dump_list_lock, flags);
2230 if (dumper->registered) {
2231 dumper->registered = 0;
2232 list_del_rcu(&dumper->list);
2233 err = 0;
2234 }
2235 spin_unlock_irqrestore(&dump_list_lock, flags);
2236 synchronize_rcu();
2237
2238 return err;
2239 }
2240 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
2241
2242 static bool always_kmsg_dump;
2243 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
2244
2245 /**
2246 * kmsg_dump - dump kernel log to kernel message dumpers.
2247 * @reason: the reason (oops, panic etc) for dumping
2248 *
2249 * Iterate through each of the dump devices and call the oops/panic
2250 * callbacks with the log buffer.
2251 */
2252 void kmsg_dump(enum kmsg_dump_reason reason)
2253 {
2254 u64 idx;
2255 struct kmsg_dumper *dumper;
2256 const char *s1, *s2;
2257 unsigned long l1, l2;
2258 unsigned long flags;
2259
2260 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
2261 return;
2262
2263 /* Theoretically, the log could move on after we do this, but
2264 there's not a lot we can do about that. The new messages
2265 will overwrite the start of what we dump. */
2266
2267 raw_spin_lock_irqsave(&logbuf_lock, flags);
2268 if (syslog_seq < log_first_seq)
2269 idx = syslog_idx;
2270 else
2271 idx = log_first_idx;
2272
2273 if (idx > log_next_idx) {
2274 s1 = log_buf;
2275 l1 = log_next_idx;
2276
2277 s2 = log_buf + idx;
2278 l2 = log_buf_len - idx;
2279 } else {
2280 s1 = "";
2281 l1 = 0;
2282
2283 s2 = log_buf + idx;
2284 l2 = log_next_idx - idx;
2285 }
2286 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2287
2288 rcu_read_lock();
2289 list_for_each_entry_rcu(dumper, &dump_list, list)
2290 dumper->dump(dumper, reason, s1, l1, s2, l2);
2291 rcu_read_unlock();
2292 }
2293 #endif
This page took 0.149718 seconds and 5 git commands to generate.