28b93d39a94eb6b76b72d1db051b2f52b48175e7
[deliverable/linux.git] / drivers / staging / android / logger.c
1 /*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #define pr_fmt(fmt) "logger: " fmt
21
22 #include <linux/sched.h>
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/miscdevice.h>
26 #include <linux/uaccess.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/vmalloc.h>
31 #include <linux/aio.h>
32 #include "logger.h"
33
34 #include <asm/ioctls.h>
35
36 /**
37 * struct logger_log - represents a specific log, such as 'main' or 'radio'
38 * @buffer: The actual ring buffer
39 * @misc: The "misc" device representing the log
40 * @wq: The wait queue for @readers
41 * @readers: This log's readers
42 * @mutex: The mutex that protects the @buffer
43 * @w_off: The current write head offset
44 * @head: The head, or location that readers start reading at.
45 * @size: The size of the log
46 * @logs: The list of log channels
47 *
48 * This structure lives from module insertion until module removal, so it does
49 * not need additional reference counting. The structure is protected by the
50 * mutex 'mutex'.
51 */
52 struct logger_log {
53 unsigned char *buffer;
54 struct miscdevice misc;
55 wait_queue_head_t wq;
56 struct list_head readers;
57 struct mutex mutex;
58 size_t w_off;
59 size_t head;
60 size_t size;
61 struct list_head logs;
62 };
63
64 static LIST_HEAD(log_list);
65
66
67 /**
68 * struct logger_reader - a logging device open for reading
69 * @log: The associated log
70 * @list: The associated entry in @logger_log's list
71 * @r_off: The current read head offset.
72 * @r_all: Reader can read all entries
73 * @r_ver: Reader ABI version
74 *
75 * This object lives from open to release, so we don't need additional
76 * reference counting. The structure is protected by log->mutex.
77 */
78 struct logger_reader {
79 struct logger_log *log;
80 struct list_head list;
81 size_t r_off;
82 bool r_all;
83 int r_ver;
84 };
85
86 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
87 static size_t logger_offset(struct logger_log *log, size_t n)
88 {
89 return n & (log->size - 1);
90 }
91
92
93 /*
94 * file_get_log - Given a file structure, return the associated log
95 *
96 * This isn't aesthetic. We have several goals:
97 *
98 * 1) Need to quickly obtain the associated log during an I/O operation
99 * 2) Readers need to maintain state (logger_reader)
100 * 3) Writers need to be very fast (open() should be a near no-op)
101 *
102 * In the reader case, we can trivially go file->logger_reader->logger_log.
103 * For a writer, we don't want to maintain a logger_reader, so we just go
104 * file->logger_log. Thus what file->private_data points at depends on whether
105 * or not the file was opened for reading. This function hides that dirtiness.
106 */
107 static inline struct logger_log *file_get_log(struct file *file)
108 {
109 if (file->f_mode & FMODE_READ) {
110 struct logger_reader *reader = file->private_data;
111
112 return reader->log;
113 }
114 return file->private_data;
115 }
116
117 /*
118 * get_entry_header - returns a pointer to the logger_entry header within
119 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
120 * be provided. Typically the return value will be a pointer within
121 * 'logger->buf'. However, a pointer to 'scratch' may be returned if
122 * the log entry spans the end and beginning of the circular buffer.
123 */
124 static struct logger_entry *get_entry_header(struct logger_log *log,
125 size_t off, struct logger_entry *scratch)
126 {
127 size_t len = min(sizeof(struct logger_entry), log->size - off);
128
129 if (len != sizeof(struct logger_entry)) {
130 memcpy(((void *) scratch), log->buffer + off, len);
131 memcpy(((void *) scratch) + len, log->buffer,
132 sizeof(struct logger_entry) - len);
133 return scratch;
134 }
135
136 return (struct logger_entry *) (log->buffer + off);
137 }
138
139 /*
140 * get_entry_msg_len - Grabs the length of the message of the entry
141 * starting from from 'off'.
142 *
143 * An entry length is 2 bytes (16 bits) in host endian order.
144 * In the log, the length does not include the size of the log entry structure.
145 * This function returns the size including the log entry structure.
146 *
147 * Caller needs to hold log->mutex.
148 */
149 static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
150 {
151 struct logger_entry scratch;
152 struct logger_entry *entry;
153
154 entry = get_entry_header(log, off, &scratch);
155 return entry->len;
156 }
157
158 static size_t get_user_hdr_len(int ver)
159 {
160 if (ver < 2)
161 return sizeof(struct user_logger_entry_compat);
162 return sizeof(struct logger_entry);
163 }
164
165 static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
166 char __user *buf)
167 {
168 void *hdr;
169 size_t hdr_len;
170 struct user_logger_entry_compat v1;
171
172 if (ver < 2) {
173 v1.len = entry->len;
174 v1.__pad = 0;
175 v1.pid = entry->pid;
176 v1.tid = entry->tid;
177 v1.sec = entry->sec;
178 v1.nsec = entry->nsec;
179 hdr = &v1;
180 hdr_len = sizeof(struct user_logger_entry_compat);
181 } else {
182 hdr = entry;
183 hdr_len = sizeof(struct logger_entry);
184 }
185
186 return copy_to_user(buf, hdr, hdr_len);
187 }
188
189 /*
190 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
191 * user-space buffer 'buf'. Returns 'count' on success.
192 *
193 * Caller must hold log->mutex.
194 */
195 static ssize_t do_read_log_to_user(struct logger_log *log,
196 struct logger_reader *reader,
197 char __user *buf,
198 size_t count)
199 {
200 struct logger_entry scratch;
201 struct logger_entry *entry;
202 size_t len;
203 size_t msg_start;
204
205 /*
206 * First, copy the header to userspace, using the version of
207 * the header requested
208 */
209 entry = get_entry_header(log, reader->r_off, &scratch);
210 if (copy_header_to_user(reader->r_ver, entry, buf))
211 return -EFAULT;
212
213 count -= get_user_hdr_len(reader->r_ver);
214 buf += get_user_hdr_len(reader->r_ver);
215 msg_start = logger_offset(log,
216 reader->r_off + sizeof(struct logger_entry));
217
218 /*
219 * We read from the msg in two disjoint operations. First, we read from
220 * the current msg head offset up to 'count' bytes or to the end of
221 * the log, whichever comes first.
222 */
223 len = min(count, log->size - msg_start);
224 if (copy_to_user(buf, log->buffer + msg_start, len))
225 return -EFAULT;
226
227 /*
228 * Second, we read any remaining bytes, starting back at the head of
229 * the log.
230 */
231 if (count != len)
232 if (copy_to_user(buf + len, log->buffer, count - len))
233 return -EFAULT;
234
235 reader->r_off = logger_offset(log, reader->r_off +
236 sizeof(struct logger_entry) + count);
237
238 return count + get_user_hdr_len(reader->r_ver);
239 }
240
241 /*
242 * get_next_entry_by_uid - Starting at 'off', returns an offset into
243 * 'log->buffer' which contains the first entry readable by 'euid'
244 */
245 static size_t get_next_entry_by_uid(struct logger_log *log,
246 size_t off, kuid_t euid)
247 {
248 while (off != log->w_off) {
249 struct logger_entry *entry;
250 struct logger_entry scratch;
251 size_t next_len;
252
253 entry = get_entry_header(log, off, &scratch);
254
255 if (uid_eq(entry->euid, euid))
256 return off;
257
258 next_len = sizeof(struct logger_entry) + entry->len;
259 off = logger_offset(log, off + next_len);
260 }
261
262 return off;
263 }
264
265 /*
266 * logger_read - our log's read() method
267 *
268 * Behavior:
269 *
270 * - O_NONBLOCK works
271 * - If there are no log entries to read, blocks until log is written to
272 * - Atomically reads exactly one log entry
273 *
274 * Will set errno to EINVAL if read
275 * buffer is insufficient to hold next entry.
276 */
277 static ssize_t logger_read(struct file *file, char __user *buf,
278 size_t count, loff_t *pos)
279 {
280 struct logger_reader *reader = file->private_data;
281 struct logger_log *log = reader->log;
282 ssize_t ret;
283 DEFINE_WAIT(wait);
284
285 start:
286 while (1) {
287 mutex_lock(&log->mutex);
288
289 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
290
291 ret = (log->w_off == reader->r_off);
292 mutex_unlock(&log->mutex);
293 if (!ret)
294 break;
295
296 if (file->f_flags & O_NONBLOCK) {
297 ret = -EAGAIN;
298 break;
299 }
300
301 if (signal_pending(current)) {
302 ret = -EINTR;
303 break;
304 }
305
306 schedule();
307 }
308
309 finish_wait(&log->wq, &wait);
310 if (ret)
311 return ret;
312
313 mutex_lock(&log->mutex);
314
315 if (!reader->r_all)
316 reader->r_off = get_next_entry_by_uid(log,
317 reader->r_off, current_euid());
318
319 /* is there still something to read or did we race? */
320 if (unlikely(log->w_off == reader->r_off)) {
321 mutex_unlock(&log->mutex);
322 goto start;
323 }
324
325 /* get the size of the next entry */
326 ret = get_user_hdr_len(reader->r_ver) +
327 get_entry_msg_len(log, reader->r_off);
328 if (count < ret) {
329 ret = -EINVAL;
330 goto out;
331 }
332
333 /* get exactly one entry from the log */
334 ret = do_read_log_to_user(log, reader, buf, ret);
335
336 out:
337 mutex_unlock(&log->mutex);
338
339 return ret;
340 }
341
342 /*
343 * get_next_entry - return the offset of the first valid entry at least 'len'
344 * bytes after 'off'.
345 *
346 * Caller must hold log->mutex.
347 */
348 static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
349 {
350 size_t count = 0;
351
352 do {
353 size_t nr = sizeof(struct logger_entry) +
354 get_entry_msg_len(log, off);
355 off = logger_offset(log, off + nr);
356 count += nr;
357 } while (count < len);
358
359 return off;
360 }
361
362 /*
363 * is_between - is a < c < b, accounting for wrapping of a, b, and c
364 * positions in the buffer
365 *
366 * That is, if a<b, check for c between a and b
367 * and if a>b, check for c outside (not between) a and b
368 *
369 * |------- a xxxxxxxx b --------|
370 * c^
371 *
372 * |xxxxx b --------- a xxxxxxxxx|
373 * c^
374 * or c^
375 */
376 static inline int is_between(size_t a, size_t b, size_t c)
377 {
378 if (a < b) {
379 /* is c between a and b? */
380 if (a < c && c <= b)
381 return 1;
382 } else {
383 /* is c outside of b through a? */
384 if (c <= b || a < c)
385 return 1;
386 }
387
388 return 0;
389 }
390
391 /*
392 * fix_up_readers - walk the list of all readers and "fix up" any who were
393 * lapped by the writer; also do the same for the default "start head".
394 * We do this by "pulling forward" the readers and start head to the first
395 * entry after the new write head.
396 *
397 * The caller needs to hold log->mutex.
398 */
399 static void fix_up_readers(struct logger_log *log, size_t len)
400 {
401 size_t old = log->w_off;
402 size_t new = logger_offset(log, old + len);
403 struct logger_reader *reader;
404
405 if (is_between(old, new, log->head))
406 log->head = get_next_entry(log, log->head, len);
407
408 list_for_each_entry(reader, &log->readers, list)
409 if (is_between(old, new, reader->r_off))
410 reader->r_off = get_next_entry(log, reader->r_off, len);
411 }
412
413 /*
414 * logger_write_iter - our write method, implementing support for write(),
415 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
416 * them above all else.
417 */
418 static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
419 {
420 struct logger_log *log = file_get_log(iocb->ki_filp);
421 struct logger_entry header;
422 struct timespec now;
423 size_t len, count;
424
425 count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
426
427 now = current_kernel_time();
428
429 header.pid = current->tgid;
430 header.tid = current->pid;
431 header.sec = now.tv_sec;
432 header.nsec = now.tv_nsec;
433 header.euid = current_euid();
434 header.len = count;
435 header.hdr_size = sizeof(struct logger_entry);
436
437 /* null writes succeed, return zero */
438 if (unlikely(!header.len))
439 return 0;
440
441 mutex_lock(&log->mutex);
442
443 /*
444 * Fix up any readers, pulling them forward to the first readable
445 * entry after (what will be) the new write offset. We do this now
446 * because if we partially fail, we can end up with clobbered log
447 * entries that encroach on readable buffer.
448 */
449 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
450
451 len = min(sizeof(header), log->size - log->w_off);
452 memcpy(log->buffer + log->w_off, &header, len);
453 memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
454
455 len = min(count, log->size - log->w_off);
456
457 if (copy_from_iter(log->buffer + log->w_off, len, from) != len) {
458 /*
459 * Note that by not updating w_off, this abandons the
460 * portion of the new entry that *was* successfully
461 * copied, just above. This is intentional to avoid
462 * message corruption from missing fragments.
463 */
464 mutex_unlock(&log->mutex);
465 return -EFAULT;
466 }
467
468 if (copy_from_iter(log->buffer, count - len, from) != count - len) {
469 mutex_unlock(&log->mutex);
470 return -EFAULT;
471 }
472
473 log->w_off = logger_offset(log, log->w_off + count);
474 mutex_unlock(&log->mutex);
475
476 /* wake up any blocked readers */
477 wake_up_interruptible(&log->wq);
478
479 return len;
480 }
481
482 static struct logger_log *get_log_from_minor(int minor)
483 {
484 struct logger_log *log;
485
486 list_for_each_entry(log, &log_list, logs)
487 if (log->misc.minor == minor)
488 return log;
489 return NULL;
490 }
491
492 /*
493 * logger_open - the log's open() file operation
494 *
495 * Note how near a no-op this is in the write-only case. Keep it that way!
496 */
497 static int logger_open(struct inode *inode, struct file *file)
498 {
499 struct logger_log *log;
500 int ret;
501
502 ret = nonseekable_open(inode, file);
503 if (ret)
504 return ret;
505
506 log = get_log_from_minor(MINOR(inode->i_rdev));
507 if (!log)
508 return -ENODEV;
509
510 if (file->f_mode & FMODE_READ) {
511 struct logger_reader *reader;
512
513 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
514 if (!reader)
515 return -ENOMEM;
516
517 reader->log = log;
518 reader->r_ver = 1;
519 reader->r_all = in_egroup_p(inode->i_gid) ||
520 capable(CAP_SYSLOG);
521
522 INIT_LIST_HEAD(&reader->list);
523
524 mutex_lock(&log->mutex);
525 reader->r_off = log->head;
526 list_add_tail(&reader->list, &log->readers);
527 mutex_unlock(&log->mutex);
528
529 file->private_data = reader;
530 } else
531 file->private_data = log;
532
533 return 0;
534 }
535
536 /*
537 * logger_release - the log's release file operation
538 *
539 * Note this is a total no-op in the write-only case. Keep it that way!
540 */
541 static int logger_release(struct inode *ignored, struct file *file)
542 {
543 if (file->f_mode & FMODE_READ) {
544 struct logger_reader *reader = file->private_data;
545 struct logger_log *log = reader->log;
546
547 mutex_lock(&log->mutex);
548 list_del(&reader->list);
549 mutex_unlock(&log->mutex);
550
551 kfree(reader);
552 }
553
554 return 0;
555 }
556
557 /*
558 * logger_poll - the log's poll file operation, for poll/select/epoll
559 *
560 * Note we always return POLLOUT, because you can always write() to the log.
561 * Note also that, strictly speaking, a return value of POLLIN does not
562 * guarantee that the log is readable without blocking, as there is a small
563 * chance that the writer can lap the reader in the interim between poll()
564 * returning and the read() request.
565 */
566 static unsigned int logger_poll(struct file *file, poll_table *wait)
567 {
568 struct logger_reader *reader;
569 struct logger_log *log;
570 unsigned int ret = POLLOUT | POLLWRNORM;
571
572 if (!(file->f_mode & FMODE_READ))
573 return ret;
574
575 reader = file->private_data;
576 log = reader->log;
577
578 poll_wait(file, &log->wq, wait);
579
580 mutex_lock(&log->mutex);
581 if (!reader->r_all)
582 reader->r_off = get_next_entry_by_uid(log,
583 reader->r_off, current_euid());
584
585 if (log->w_off != reader->r_off)
586 ret |= POLLIN | POLLRDNORM;
587 mutex_unlock(&log->mutex);
588
589 return ret;
590 }
591
592 static long logger_set_version(struct logger_reader *reader, void __user *arg)
593 {
594 int version;
595
596 if (copy_from_user(&version, arg, sizeof(int)))
597 return -EFAULT;
598
599 if ((version < 1) || (version > 2))
600 return -EINVAL;
601
602 reader->r_ver = version;
603 return 0;
604 }
605
606 static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
607 {
608 struct logger_log *log = file_get_log(file);
609 struct logger_reader *reader;
610 long ret = -EINVAL;
611 void __user *argp = (void __user *) arg;
612
613 mutex_lock(&log->mutex);
614
615 switch (cmd) {
616 case LOGGER_GET_LOG_BUF_SIZE:
617 ret = log->size;
618 break;
619 case LOGGER_GET_LOG_LEN:
620 if (!(file->f_mode & FMODE_READ)) {
621 ret = -EBADF;
622 break;
623 }
624 reader = file->private_data;
625 if (log->w_off >= reader->r_off)
626 ret = log->w_off - reader->r_off;
627 else
628 ret = (log->size - reader->r_off) + log->w_off;
629 break;
630 case LOGGER_GET_NEXT_ENTRY_LEN:
631 if (!(file->f_mode & FMODE_READ)) {
632 ret = -EBADF;
633 break;
634 }
635 reader = file->private_data;
636
637 if (!reader->r_all)
638 reader->r_off = get_next_entry_by_uid(log,
639 reader->r_off, current_euid());
640
641 if (log->w_off != reader->r_off)
642 ret = get_user_hdr_len(reader->r_ver) +
643 get_entry_msg_len(log, reader->r_off);
644 else
645 ret = 0;
646 break;
647 case LOGGER_FLUSH_LOG:
648 if (!(file->f_mode & FMODE_WRITE)) {
649 ret = -EBADF;
650 break;
651 }
652 if (!(in_egroup_p(file_inode(file)->i_gid) ||
653 capable(CAP_SYSLOG))) {
654 ret = -EPERM;
655 break;
656 }
657 list_for_each_entry(reader, &log->readers, list)
658 reader->r_off = log->w_off;
659 log->head = log->w_off;
660 ret = 0;
661 break;
662 case LOGGER_GET_VERSION:
663 if (!(file->f_mode & FMODE_READ)) {
664 ret = -EBADF;
665 break;
666 }
667 reader = file->private_data;
668 ret = reader->r_ver;
669 break;
670 case LOGGER_SET_VERSION:
671 if (!(file->f_mode & FMODE_READ)) {
672 ret = -EBADF;
673 break;
674 }
675 reader = file->private_data;
676 ret = logger_set_version(reader, argp);
677 break;
678 }
679
680 mutex_unlock(&log->mutex);
681
682 return ret;
683 }
684
685 static const struct file_operations logger_fops = {
686 .owner = THIS_MODULE,
687 .read = logger_read,
688 .write_iter = logger_write_iter,
689 .poll = logger_poll,
690 .unlocked_ioctl = logger_ioctl,
691 .compat_ioctl = logger_ioctl,
692 .open = logger_open,
693 .release = logger_release,
694 };
695
696 /*
697 * Log size must must be a power of two, and greater than
698 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
699 */
700 static int __init create_log(char *log_name, int size)
701 {
702 int ret = 0;
703 struct logger_log *log;
704 unsigned char *buffer;
705
706 buffer = vmalloc(size);
707 if (buffer == NULL)
708 return -ENOMEM;
709
710 log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
711 if (log == NULL) {
712 ret = -ENOMEM;
713 goto out_free_buffer;
714 }
715 log->buffer = buffer;
716
717 log->misc.minor = MISC_DYNAMIC_MINOR;
718 log->misc.name = kstrdup(log_name, GFP_KERNEL);
719 if (log->misc.name == NULL) {
720 ret = -ENOMEM;
721 goto out_free_log;
722 }
723
724 log->misc.fops = &logger_fops;
725 log->misc.parent = NULL;
726
727 init_waitqueue_head(&log->wq);
728 INIT_LIST_HEAD(&log->readers);
729 mutex_init(&log->mutex);
730 log->w_off = 0;
731 log->head = 0;
732 log->size = size;
733
734 INIT_LIST_HEAD(&log->logs);
735 list_add_tail(&log->logs, &log_list);
736
737 /* finally, initialize the misc device for this log */
738 ret = misc_register(&log->misc);
739 if (unlikely(ret)) {
740 pr_err("failed to register misc device for log '%s'!\n",
741 log->misc.name);
742 goto out_free_misc_name;
743 }
744
745 pr_info("created %luK log '%s'\n",
746 (unsigned long) log->size >> 10, log->misc.name);
747
748 return 0;
749
750 out_free_misc_name:
751 kfree(log->misc.name);
752
753 out_free_log:
754 kfree(log);
755
756 out_free_buffer:
757 vfree(buffer);
758 return ret;
759 }
760
761 static int __init logger_init(void)
762 {
763 int ret;
764
765 ret = create_log(LOGGER_LOG_MAIN, 256*1024);
766 if (unlikely(ret))
767 goto out;
768
769 ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
770 if (unlikely(ret))
771 goto out;
772
773 ret = create_log(LOGGER_LOG_RADIO, 256*1024);
774 if (unlikely(ret))
775 goto out;
776
777 ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
778 if (unlikely(ret))
779 goto out;
780
781 out:
782 return ret;
783 }
784
785 static void __exit logger_exit(void)
786 {
787 struct logger_log *current_log, *next_log;
788
789 list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
790 /* we have to delete all the entry inside log_list */
791 misc_deregister(&current_log->misc);
792 vfree(current_log->buffer);
793 kfree(current_log->misc.name);
794 list_del(&current_log->logs);
795 kfree(current_log);
796 }
797 }
798
799
800 device_initcall(logger_init);
801 module_exit(logger_exit);
802
803 MODULE_LICENSE("GPL");
804 MODULE_AUTHOR("Robert Love, <rlove@google.com>");
805 MODULE_DESCRIPTION("Android Logger");
This page took 0.0460469999999999 seconds and 4 git commands to generate.