2 * drivers/misc/logger.c
6 * Copyright (C) 2007-2008 Google, Inc.
8 * Robert Love <rlove@google.com>
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define pr_fmt(fmt) "logger: " fmt
22 #include <linux/sched.h>
23 #include <linux/module.h>
25 #include <linux/miscdevice.h>
26 #include <linux/uaccess.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/vmalloc.h>
31 #include <linux/aio.h>
34 #include <asm/ioctls.h>
37 * struct logger_log - represents a specific log, such as 'main' or 'radio'
38 * @buffer: The actual ring buffer
39 * @misc: The "misc" device representing the log
40 * @wq: The wait queue for @readers
41 * @readers: This log's readers
42 * @mutex: The mutex that protects the @buffer
43 * @w_off: The current write head offset
44 * @head: The head, or location that readers start reading at.
45 * @size: The size of the log
46 * @logs: The list of log channels
48 * This structure lives from module insertion until module removal, so it does
49 * not need additional reference counting. The structure is protected by the
53 unsigned char *buffer
;
54 struct miscdevice misc
;
56 struct list_head readers
;
61 struct list_head logs
;
64 static LIST_HEAD(log_list
);
68 * struct logger_reader - a logging device open for reading
69 * @log: The associated log
70 * @list: The associated entry in @logger_log's list
71 * @r_off: The current read head offset.
72 * @r_all: Reader can read all entries
73 * @r_ver: Reader ABI version
75 * This object lives from open to release, so we don't need additional
76 * reference counting. The structure is protected by log->mutex.
78 struct logger_reader
{
79 struct logger_log
*log
;
80 struct list_head list
;
86 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
87 static size_t logger_offset(struct logger_log
*log
, size_t n
)
89 return n
& (log
->size
- 1);
94 * file_get_log - Given a file structure, return the associated log
96 * This isn't aesthetic. We have several goals:
98 * 1) Need to quickly obtain the associated log during an I/O operation
99 * 2) Readers need to maintain state (logger_reader)
100 * 3) Writers need to be very fast (open() should be a near no-op)
102 * In the reader case, we can trivially go file->logger_reader->logger_log.
103 * For a writer, we don't want to maintain a logger_reader, so we just go
104 * file->logger_log. Thus what file->private_data points at depends on whether
105 * or not the file was opened for reading. This function hides that dirtiness.
107 static inline struct logger_log
*file_get_log(struct file
*file
)
109 if (file
->f_mode
& FMODE_READ
) {
110 struct logger_reader
*reader
= file
->private_data
;
114 return file
->private_data
;
118 * get_entry_header - returns a pointer to the logger_entry header within
119 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
120 * be provided. Typically the return value will be a pointer within
121 * 'logger->buf'. However, a pointer to 'scratch' may be returned if
122 * the log entry spans the end and beginning of the circular buffer.
124 static struct logger_entry
*get_entry_header(struct logger_log
*log
,
125 size_t off
, struct logger_entry
*scratch
)
127 size_t len
= min(sizeof(struct logger_entry
), log
->size
- off
);
129 if (len
!= sizeof(struct logger_entry
)) {
130 memcpy(((void *) scratch
), log
->buffer
+ off
, len
);
131 memcpy(((void *) scratch
) + len
, log
->buffer
,
132 sizeof(struct logger_entry
) - len
);
136 return (struct logger_entry
*) (log
->buffer
+ off
);
140 * get_entry_msg_len - Grabs the length of the message of the entry
141 * starting from from 'off'.
143 * An entry length is 2 bytes (16 bits) in host endian order.
144 * In the log, the length does not include the size of the log entry structure.
145 * This function returns the size including the log entry structure.
147 * Caller needs to hold log->mutex.
149 static __u32
get_entry_msg_len(struct logger_log
*log
, size_t off
)
151 struct logger_entry scratch
;
152 struct logger_entry
*entry
;
154 entry
= get_entry_header(log
, off
, &scratch
);
158 static size_t get_user_hdr_len(int ver
)
161 return sizeof(struct user_logger_entry_compat
);
162 return sizeof(struct logger_entry
);
165 static ssize_t
copy_header_to_user(int ver
, struct logger_entry
*entry
,
170 struct user_logger_entry_compat v1
;
178 v1
.nsec
= entry
->nsec
;
180 hdr_len
= sizeof(struct user_logger_entry_compat
);
183 hdr_len
= sizeof(struct logger_entry
);
186 return copy_to_user(buf
, hdr
, hdr_len
);
190 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
191 * user-space buffer 'buf'. Returns 'count' on success.
193 * Caller must hold log->mutex.
195 static ssize_t
do_read_log_to_user(struct logger_log
*log
,
196 struct logger_reader
*reader
,
200 struct logger_entry scratch
;
201 struct logger_entry
*entry
;
206 * First, copy the header to userspace, using the version of
207 * the header requested
209 entry
= get_entry_header(log
, reader
->r_off
, &scratch
);
210 if (copy_header_to_user(reader
->r_ver
, entry
, buf
))
213 count
-= get_user_hdr_len(reader
->r_ver
);
214 buf
+= get_user_hdr_len(reader
->r_ver
);
215 msg_start
= logger_offset(log
,
216 reader
->r_off
+ sizeof(struct logger_entry
));
219 * We read from the msg in two disjoint operations. First, we read from
220 * the current msg head offset up to 'count' bytes or to the end of
221 * the log, whichever comes first.
223 len
= min(count
, log
->size
- msg_start
);
224 if (copy_to_user(buf
, log
->buffer
+ msg_start
, len
))
228 * Second, we read any remaining bytes, starting back at the head of
232 if (copy_to_user(buf
+ len
, log
->buffer
, count
- len
))
235 reader
->r_off
= logger_offset(log
, reader
->r_off
+
236 sizeof(struct logger_entry
) + count
);
238 return count
+ get_user_hdr_len(reader
->r_ver
);
242 * get_next_entry_by_uid - Starting at 'off', returns an offset into
243 * 'log->buffer' which contains the first entry readable by 'euid'
245 static size_t get_next_entry_by_uid(struct logger_log
*log
,
246 size_t off
, kuid_t euid
)
248 while (off
!= log
->w_off
) {
249 struct logger_entry
*entry
;
250 struct logger_entry scratch
;
253 entry
= get_entry_header(log
, off
, &scratch
);
255 if (uid_eq(entry
->euid
, euid
))
258 next_len
= sizeof(struct logger_entry
) + entry
->len
;
259 off
= logger_offset(log
, off
+ next_len
);
266 * logger_read - our log's read() method
271 * - If there are no log entries to read, blocks until log is written to
272 * - Atomically reads exactly one log entry
274 * Will set errno to EINVAL if read
275 * buffer is insufficient to hold next entry.
277 static ssize_t
logger_read(struct file
*file
, char __user
*buf
,
278 size_t count
, loff_t
*pos
)
280 struct logger_reader
*reader
= file
->private_data
;
281 struct logger_log
*log
= reader
->log
;
287 mutex_lock(&log
->mutex
);
289 prepare_to_wait(&log
->wq
, &wait
, TASK_INTERRUPTIBLE
);
291 ret
= (log
->w_off
== reader
->r_off
);
292 mutex_unlock(&log
->mutex
);
296 if (file
->f_flags
& O_NONBLOCK
) {
301 if (signal_pending(current
)) {
309 finish_wait(&log
->wq
, &wait
);
313 mutex_lock(&log
->mutex
);
316 reader
->r_off
= get_next_entry_by_uid(log
,
317 reader
->r_off
, current_euid());
319 /* is there still something to read or did we race? */
320 if (unlikely(log
->w_off
== reader
->r_off
)) {
321 mutex_unlock(&log
->mutex
);
325 /* get the size of the next entry */
326 ret
= get_user_hdr_len(reader
->r_ver
) +
327 get_entry_msg_len(log
, reader
->r_off
);
333 /* get exactly one entry from the log */
334 ret
= do_read_log_to_user(log
, reader
, buf
, ret
);
337 mutex_unlock(&log
->mutex
);
343 * get_next_entry - return the offset of the first valid entry at least 'len'
346 * Caller must hold log->mutex.
348 static size_t get_next_entry(struct logger_log
*log
, size_t off
, size_t len
)
353 size_t nr
= sizeof(struct logger_entry
) +
354 get_entry_msg_len(log
, off
);
355 off
= logger_offset(log
, off
+ nr
);
357 } while (count
< len
);
363 * is_between - is a < c < b, accounting for wrapping of a, b, and c
364 * positions in the buffer
366 * That is, if a<b, check for c between a and b
367 * and if a>b, check for c outside (not between) a and b
369 * |------- a xxxxxxxx b --------|
372 * |xxxxx b --------- a xxxxxxxxx|
376 static inline int is_between(size_t a
, size_t b
, size_t c
)
379 /* is c between a and b? */
383 /* is c outside of b through a? */
392 * fix_up_readers - walk the list of all readers and "fix up" any who were
393 * lapped by the writer; also do the same for the default "start head".
394 * We do this by "pulling forward" the readers and start head to the first
395 * entry after the new write head.
397 * The caller needs to hold log->mutex.
399 static void fix_up_readers(struct logger_log
*log
, size_t len
)
401 size_t old
= log
->w_off
;
402 size_t new = logger_offset(log
, old
+ len
);
403 struct logger_reader
*reader
;
405 if (is_between(old
, new, log
->head
))
406 log
->head
= get_next_entry(log
, log
->head
, len
);
408 list_for_each_entry(reader
, &log
->readers
, list
)
409 if (is_between(old
, new, reader
->r_off
))
410 reader
->r_off
= get_next_entry(log
, reader
->r_off
, len
);
414 * logger_write_iter - our write method, implementing support for write(),
415 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
416 * them above all else.
418 static ssize_t
logger_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
420 struct logger_log
*log
= file_get_log(iocb
->ki_filp
);
421 struct logger_entry header
;
425 count
= min_t(size_t, iocb
->ki_nbytes
, LOGGER_ENTRY_MAX_PAYLOAD
);
427 now
= current_kernel_time();
429 header
.pid
= current
->tgid
;
430 header
.tid
= current
->pid
;
431 header
.sec
= now
.tv_sec
;
432 header
.nsec
= now
.tv_nsec
;
433 header
.euid
= current_euid();
435 header
.hdr_size
= sizeof(struct logger_entry
);
437 /* null writes succeed, return zero */
438 if (unlikely(!header
.len
))
441 mutex_lock(&log
->mutex
);
444 * Fix up any readers, pulling them forward to the first readable
445 * entry after (what will be) the new write offset. We do this now
446 * because if we partially fail, we can end up with clobbered log
447 * entries that encroach on readable buffer.
449 fix_up_readers(log
, sizeof(struct logger_entry
) + header
.len
);
451 len
= min(sizeof(header
), log
->size
- log
->w_off
);
452 memcpy(log
->buffer
+ log
->w_off
, &header
, len
);
453 memcpy(log
->buffer
, (char *)&header
+ len
, sizeof(header
) - len
);
455 len
= min(count
, log
->size
- log
->w_off
);
457 if (copy_from_iter(log
->buffer
+ log
->w_off
, len
, from
) != len
) {
459 * Note that by not updating w_off, this abandons the
460 * portion of the new entry that *was* successfully
461 * copied, just above. This is intentional to avoid
462 * message corruption from missing fragments.
464 mutex_unlock(&log
->mutex
);
468 if (copy_from_iter(log
->buffer
, count
- len
, from
) != count
- len
) {
469 mutex_unlock(&log
->mutex
);
473 log
->w_off
= logger_offset(log
, log
->w_off
+ count
);
474 mutex_unlock(&log
->mutex
);
476 /* wake up any blocked readers */
477 wake_up_interruptible(&log
->wq
);
482 static struct logger_log
*get_log_from_minor(int minor
)
484 struct logger_log
*log
;
486 list_for_each_entry(log
, &log_list
, logs
)
487 if (log
->misc
.minor
== minor
)
493 * logger_open - the log's open() file operation
495 * Note how near a no-op this is in the write-only case. Keep it that way!
497 static int logger_open(struct inode
*inode
, struct file
*file
)
499 struct logger_log
*log
;
502 ret
= nonseekable_open(inode
, file
);
506 log
= get_log_from_minor(MINOR(inode
->i_rdev
));
510 if (file
->f_mode
& FMODE_READ
) {
511 struct logger_reader
*reader
;
513 reader
= kmalloc(sizeof(struct logger_reader
), GFP_KERNEL
);
519 reader
->r_all
= in_egroup_p(inode
->i_gid
) ||
522 INIT_LIST_HEAD(&reader
->list
);
524 mutex_lock(&log
->mutex
);
525 reader
->r_off
= log
->head
;
526 list_add_tail(&reader
->list
, &log
->readers
);
527 mutex_unlock(&log
->mutex
);
529 file
->private_data
= reader
;
531 file
->private_data
= log
;
537 * logger_release - the log's release file operation
539 * Note this is a total no-op in the write-only case. Keep it that way!
541 static int logger_release(struct inode
*ignored
, struct file
*file
)
543 if (file
->f_mode
& FMODE_READ
) {
544 struct logger_reader
*reader
= file
->private_data
;
545 struct logger_log
*log
= reader
->log
;
547 mutex_lock(&log
->mutex
);
548 list_del(&reader
->list
);
549 mutex_unlock(&log
->mutex
);
558 * logger_poll - the log's poll file operation, for poll/select/epoll
560 * Note we always return POLLOUT, because you can always write() to the log.
561 * Note also that, strictly speaking, a return value of POLLIN does not
562 * guarantee that the log is readable without blocking, as there is a small
563 * chance that the writer can lap the reader in the interim between poll()
564 * returning and the read() request.
566 static unsigned int logger_poll(struct file
*file
, poll_table
*wait
)
568 struct logger_reader
*reader
;
569 struct logger_log
*log
;
570 unsigned int ret
= POLLOUT
| POLLWRNORM
;
572 if (!(file
->f_mode
& FMODE_READ
))
575 reader
= file
->private_data
;
578 poll_wait(file
, &log
->wq
, wait
);
580 mutex_lock(&log
->mutex
);
582 reader
->r_off
= get_next_entry_by_uid(log
,
583 reader
->r_off
, current_euid());
585 if (log
->w_off
!= reader
->r_off
)
586 ret
|= POLLIN
| POLLRDNORM
;
587 mutex_unlock(&log
->mutex
);
592 static long logger_set_version(struct logger_reader
*reader
, void __user
*arg
)
596 if (copy_from_user(&version
, arg
, sizeof(int)))
599 if ((version
< 1) || (version
> 2))
602 reader
->r_ver
= version
;
606 static long logger_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
608 struct logger_log
*log
= file_get_log(file
);
609 struct logger_reader
*reader
;
611 void __user
*argp
= (void __user
*) arg
;
613 mutex_lock(&log
->mutex
);
616 case LOGGER_GET_LOG_BUF_SIZE
:
619 case LOGGER_GET_LOG_LEN
:
620 if (!(file
->f_mode
& FMODE_READ
)) {
624 reader
= file
->private_data
;
625 if (log
->w_off
>= reader
->r_off
)
626 ret
= log
->w_off
- reader
->r_off
;
628 ret
= (log
->size
- reader
->r_off
) + log
->w_off
;
630 case LOGGER_GET_NEXT_ENTRY_LEN
:
631 if (!(file
->f_mode
& FMODE_READ
)) {
635 reader
= file
->private_data
;
638 reader
->r_off
= get_next_entry_by_uid(log
,
639 reader
->r_off
, current_euid());
641 if (log
->w_off
!= reader
->r_off
)
642 ret
= get_user_hdr_len(reader
->r_ver
) +
643 get_entry_msg_len(log
, reader
->r_off
);
647 case LOGGER_FLUSH_LOG
:
648 if (!(file
->f_mode
& FMODE_WRITE
)) {
652 if (!(in_egroup_p(file_inode(file
)->i_gid
) ||
653 capable(CAP_SYSLOG
))) {
657 list_for_each_entry(reader
, &log
->readers
, list
)
658 reader
->r_off
= log
->w_off
;
659 log
->head
= log
->w_off
;
662 case LOGGER_GET_VERSION
:
663 if (!(file
->f_mode
& FMODE_READ
)) {
667 reader
= file
->private_data
;
670 case LOGGER_SET_VERSION
:
671 if (!(file
->f_mode
& FMODE_READ
)) {
675 reader
= file
->private_data
;
676 ret
= logger_set_version(reader
, argp
);
680 mutex_unlock(&log
->mutex
);
685 static const struct file_operations logger_fops
= {
686 .owner
= THIS_MODULE
,
688 .write_iter
= logger_write_iter
,
690 .unlocked_ioctl
= logger_ioctl
,
691 .compat_ioctl
= logger_ioctl
,
693 .release
= logger_release
,
697 * Log size must must be a power of two, and greater than
698 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
700 static int __init
create_log(char *log_name
, int size
)
703 struct logger_log
*log
;
704 unsigned char *buffer
;
706 buffer
= vmalloc(size
);
710 log
= kzalloc(sizeof(struct logger_log
), GFP_KERNEL
);
713 goto out_free_buffer
;
715 log
->buffer
= buffer
;
717 log
->misc
.minor
= MISC_DYNAMIC_MINOR
;
718 log
->misc
.name
= kstrdup(log_name
, GFP_KERNEL
);
719 if (log
->misc
.name
== NULL
) {
724 log
->misc
.fops
= &logger_fops
;
725 log
->misc
.parent
= NULL
;
727 init_waitqueue_head(&log
->wq
);
728 INIT_LIST_HEAD(&log
->readers
);
729 mutex_init(&log
->mutex
);
734 INIT_LIST_HEAD(&log
->logs
);
735 list_add_tail(&log
->logs
, &log_list
);
737 /* finally, initialize the misc device for this log */
738 ret
= misc_register(&log
->misc
);
740 pr_err("failed to register misc device for log '%s'!\n",
742 goto out_free_misc_name
;
745 pr_info("created %luK log '%s'\n",
746 (unsigned long) log
->size
>> 10, log
->misc
.name
);
751 kfree(log
->misc
.name
);
761 static int __init
logger_init(void)
765 ret
= create_log(LOGGER_LOG_MAIN
, 256*1024);
769 ret
= create_log(LOGGER_LOG_EVENTS
, 256*1024);
773 ret
= create_log(LOGGER_LOG_RADIO
, 256*1024);
777 ret
= create_log(LOGGER_LOG_SYSTEM
, 256*1024);
785 static void __exit
logger_exit(void)
787 struct logger_log
*current_log
, *next_log
;
789 list_for_each_entry_safe(current_log
, next_log
, &log_list
, logs
) {
790 /* we have to delete all the entry inside log_list */
791 misc_deregister(¤t_log
->misc
);
792 vfree(current_log
->buffer
);
793 kfree(current_log
->misc
.name
);
794 list_del(¤t_log
->logs
);
800 device_initcall(logger_init
);
801 module_exit(logger_exit
);
803 MODULE_LICENSE("GPL");
804 MODULE_AUTHOR("Robert Love, <rlove@google.com>");
805 MODULE_DESCRIPTION("Android Logger");