staging: android: logger: Allocate logs dynamically at boot (v3)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / logger.c
CommitLineData
355b0502
GKH
1/*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
23687af9 20#include <linux/sched.h>
355b0502
GKH
21#include <linux/module.h>
22#include <linux/fs.h>
23#include <linux/miscdevice.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
c11a166c 26#include <linux/slab.h>
355b0502 27#include <linux/time.h>
10b24199 28#include <linux/vmalloc.h>
355b0502
GKH
29#include "logger.h"
30
31#include <asm/ioctls.h>
32
33/*
34 * struct logger_log - represents a specific log, such as 'main' or 'radio'
35 *
36 * This structure lives from module insertion until module removal, so it does
37 * not need additional reference counting. The structure is protected by the
38 * mutex 'mutex'.
39 */
40struct logger_log {
277cdd01 41 unsigned char *buffer;/* the ring buffer itself */
355b0502
GKH
42 struct miscdevice misc; /* misc device representing the log */
43 wait_queue_head_t wq; /* wait queue for readers */
44 struct list_head readers; /* this log's readers */
45 struct mutex mutex; /* mutex protecting buffer */
46 size_t w_off; /* current write head offset */
47 size_t head; /* new readers start here */
48 size_t size; /* size of the log */
10b24199 49 struct list_head logs; /* list of log channels (myself)*/
355b0502
GKH
50};
51
10b24199
TB
52static LIST_HEAD(log_list);
53
54
355b0502
GKH
55/*
56 * struct logger_reader - a logging device open for reading
57 *
58 * This object lives from open to release, so we don't need additional
59 * reference counting. The structure is protected by log->mutex.
60 */
61struct logger_reader {
62 struct logger_log *log; /* associated log */
63 struct list_head list; /* entry in logger_log's list */
64 size_t r_off; /* current read head offset */
65};
66
67/* logger_offset - returns index 'n' into the log via (optimized) modulus */
c626224d
TB
68size_t logger_offset(struct logger_log *log, size_t n)
69{
70 return n & (log->size-1);
71}
72
355b0502
GKH
73
74/*
75 * file_get_log - Given a file structure, return the associated log
76 *
77 * This isn't aesthetic. We have several goals:
78 *
277cdd01
MN
79 * 1) Need to quickly obtain the associated log during an I/O operation
80 * 2) Readers need to maintain state (logger_reader)
81 * 3) Writers need to be very fast (open() should be a near no-op)
355b0502
GKH
82 *
83 * In the reader case, we can trivially go file->logger_reader->logger_log.
84 * For a writer, we don't want to maintain a logger_reader, so we just go
85 * file->logger_log. Thus what file->private_data points at depends on whether
86 * or not the file was opened for reading. This function hides that dirtiness.
87 */
88static inline struct logger_log *file_get_log(struct file *file)
89{
90 if (file->f_mode & FMODE_READ) {
91 struct logger_reader *reader = file->private_data;
92 return reader->log;
93 } else
94 return file->private_data;
95}
96
97/*
98 * get_entry_len - Grabs the length of the payload of the next entry starting
99 * from 'off'.
100 *
3bcfa431
TB
101 * An entry length is 2 bytes (16 bits) in host endian order.
102 * In the log, the length does not include the size of the log entry structure.
103 * This function returns the size including the log entry structure.
104 *
355b0502
GKH
105 * Caller needs to hold log->mutex.
106 */
107static __u32 get_entry_len(struct logger_log *log, size_t off)
108{
109 __u16 val;
110
3bcfa431
TB
111 /* copy 2 bytes from buffer, in memcpy order, */
112 /* handling possible wrap at end of buffer */
113
114 ((__u8 *)&val)[0] = log->buffer[off];
115 if (likely(off+1 < log->size))
116 ((__u8 *)&val)[1] = log->buffer[off+1];
117 else
118 ((__u8 *)&val)[1] = log->buffer[0];
355b0502
GKH
119
120 return sizeof(struct logger_entry) + val;
121}
122
123/*
124 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
125 * user-space buffer 'buf'. Returns 'count' on success.
126 *
127 * Caller must hold log->mutex.
128 */
129static ssize_t do_read_log_to_user(struct logger_log *log,
130 struct logger_reader *reader,
131 char __user *buf,
132 size_t count)
133{
134 size_t len;
135
136 /*
137 * We read from the log in two disjoint operations. First, we read from
138 * the current read head offset up to 'count' bytes or to the end of
139 * the log, whichever comes first.
140 */
141 len = min(count, log->size - reader->r_off);
142 if (copy_to_user(buf, log->buffer + reader->r_off, len))
143 return -EFAULT;
144
145 /*
146 * Second, we read any remaining bytes, starting back at the head of
147 * the log.
148 */
149 if (count != len)
150 if (copy_to_user(buf + len, log->buffer, count - len))
151 return -EFAULT;
152
c626224d 153 reader->r_off = logger_offset(log, reader->r_off + count);
355b0502
GKH
154
155 return count;
156}
157
158/*
159 * logger_read - our log's read() method
160 *
161 * Behavior:
162 *
277cdd01
MN
163 * - O_NONBLOCK works
164 * - If there are no log entries to read, blocks until log is written to
165 * - Atomically reads exactly one log entry
355b0502
GKH
166 *
167 * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
168 * buffer is insufficient to hold next entry.
169 */
170static ssize_t logger_read(struct file *file, char __user *buf,
171 size_t count, loff_t *pos)
172{
173 struct logger_reader *reader = file->private_data;
174 struct logger_log *log = reader->log;
175 ssize_t ret;
176 DEFINE_WAIT(wait);
177
178start:
179 while (1) {
c76c7ca3
TB
180 mutex_lock(&log->mutex);
181
355b0502
GKH
182 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
183
355b0502
GKH
184 ret = (log->w_off == reader->r_off);
185 mutex_unlock(&log->mutex);
186 if (!ret)
187 break;
188
189 if (file->f_flags & O_NONBLOCK) {
190 ret = -EAGAIN;
191 break;
192 }
193
194 if (signal_pending(current)) {
195 ret = -EINTR;
196 break;
197 }
198
199 schedule();
200 }
201
202 finish_wait(&log->wq, &wait);
203 if (ret)
204 return ret;
205
206 mutex_lock(&log->mutex);
207
208 /* is there still something to read or did we race? */
209 if (unlikely(log->w_off == reader->r_off)) {
210 mutex_unlock(&log->mutex);
211 goto start;
212 }
213
214 /* get the size of the next entry */
215 ret = get_entry_len(log, reader->r_off);
216 if (count < ret) {
217 ret = -EINVAL;
218 goto out;
219 }
220
221 /* get exactly one entry from the log */
222 ret = do_read_log_to_user(log, reader, buf, ret);
223
224out:
225 mutex_unlock(&log->mutex);
226
227 return ret;
228}
229
230/*
231 * get_next_entry - return the offset of the first valid entry at least 'len'
232 * bytes after 'off'.
233 *
234 * Caller must hold log->mutex.
235 */
236static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
237{
238 size_t count = 0;
239
240 do {
241 size_t nr = get_entry_len(log, off);
c626224d 242 off = logger_offset(log, off + nr);
355b0502
GKH
243 count += nr;
244 } while (count < len);
245
246 return off;
247}
248
249/*
169c843a
TB
250 * is_between - is a < c < b, accounting for wrapping of a, b, and c
251 * positions in the buffer
252 *
253 * That is, if a<b, check for c between a and b
254 * and if a>b, check for c outside (not between) a and b
255 *
256 * |------- a xxxxxxxx b --------|
257 * c^
258 *
259 * |xxxxx b --------- a xxxxxxxxx|
260 * c^
261 * or c^
355b0502 262 */
169c843a 263static inline int is_between(size_t a, size_t b, size_t c)
355b0502 264{
169c843a
TB
265 if (a < b) {
266 /* is c between a and b? */
267 if (a < c && c <= b)
355b0502
GKH
268 return 1;
269 } else {
169c843a
TB
270 /* is c outside of b through a? */
271 if (c <= b || a < c)
355b0502
GKH
272 return 1;
273 }
274
275 return 0;
276}
277
278/*
279 * fix_up_readers - walk the list of all readers and "fix up" any who were
280 * lapped by the writer; also do the same for the default "start head".
281 * We do this by "pulling forward" the readers and start head to the first
282 * entry after the new write head.
283 *
284 * The caller needs to hold log->mutex.
285 */
286static void fix_up_readers(struct logger_log *log, size_t len)
287{
288 size_t old = log->w_off;
c626224d 289 size_t new = logger_offset(log, old + len);
355b0502
GKH
290 struct logger_reader *reader;
291
169c843a 292 if (is_between(old, new, log->head))
355b0502
GKH
293 log->head = get_next_entry(log, log->head, len);
294
295 list_for_each_entry(reader, &log->readers, list)
169c843a 296 if (is_between(old, new, reader->r_off))
355b0502
GKH
297 reader->r_off = get_next_entry(log, reader->r_off, len);
298}
299
300/*
301 * do_write_log - writes 'len' bytes from 'buf' to 'log'
302 *
303 * The caller needs to hold log->mutex.
304 */
305static void do_write_log(struct logger_log *log, const void *buf, size_t count)
306{
307 size_t len;
308
309 len = min(count, log->size - log->w_off);
310 memcpy(log->buffer + log->w_off, buf, len);
311
312 if (count != len)
313 memcpy(log->buffer, buf + len, count - len);
314
c626224d 315 log->w_off = logger_offset(log, log->w_off + count);
355b0502
GKH
316
317}
318
319/*
320 * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
321 * the log 'log'
322 *
323 * The caller needs to hold log->mutex.
324 *
325 * Returns 'count' on success, negative error code on failure.
326 */
327static ssize_t do_write_log_from_user(struct logger_log *log,
328 const void __user *buf, size_t count)
329{
330 size_t len;
331
332 len = min(count, log->size - log->w_off);
333 if (len && copy_from_user(log->buffer + log->w_off, buf, len))
334 return -EFAULT;
335
336 if (count != len)
337 if (copy_from_user(log->buffer, buf + len, count - len))
350a1955
TB
338 /*
339 * Note that by not updating w_off, this abandons the
340 * portion of the new entry that *was* successfully
341 * copied, just above. This is intentional to avoid
342 * message corruption from missing fragments.
343 */
355b0502
GKH
344 return -EFAULT;
345
c626224d 346 log->w_off = logger_offset(log, log->w_off + count);
355b0502
GKH
347
348 return count;
349}
350
351/*
352 * logger_aio_write - our write method, implementing support for write(),
353 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
354 * them above all else.
355 */
356ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
357 unsigned long nr_segs, loff_t ppos)
358{
359 struct logger_log *log = file_get_log(iocb->ki_filp);
360 size_t orig = log->w_off;
361 struct logger_entry header;
362 struct timespec now;
363 ssize_t ret = 0;
364
365 now = current_kernel_time();
366
367 header.pid = current->tgid;
368 header.tid = current->pid;
369 header.sec = now.tv_sec;
370 header.nsec = now.tv_nsec;
371 header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
372
373 /* null writes succeed, return zero */
374 if (unlikely(!header.len))
375 return 0;
376
377 mutex_lock(&log->mutex);
378
379 /*
380 * Fix up any readers, pulling them forward to the first readable
381 * entry after (what will be) the new write offset. We do this now
382 * because if we partially fail, we can end up with clobbered log
383 * entries that encroach on readable buffer.
384 */
385 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
386
387 do_write_log(log, &header, sizeof(struct logger_entry));
388
389 while (nr_segs-- > 0) {
390 size_t len;
391 ssize_t nr;
392
393 /* figure out how much of this vector we can keep */
394 len = min_t(size_t, iov->iov_len, header.len - ret);
395
396 /* write out this segment's payload */
397 nr = do_write_log_from_user(log, iov->iov_base, len);
398 if (unlikely(nr < 0)) {
399 log->w_off = orig;
400 mutex_unlock(&log->mutex);
401 return nr;
402 }
403
404 iov++;
405 ret += nr;
406 }
407
408 mutex_unlock(&log->mutex);
409
410 /* wake up any blocked readers */
411 wake_up_interruptible(&log->wq);
412
413 return ret;
414}
415
10b24199
TB
416static struct logger_log *get_log_from_minor(int minor)
417{
418 struct logger_log *log;
419
420 list_for_each_entry(log, &log_list, logs)
421 if (log->misc.minor == minor)
422 return log;
423 return NULL;
424}
355b0502
GKH
425
426/*
427 * logger_open - the log's open() file operation
428 *
429 * Note how near a no-op this is in the write-only case. Keep it that way!
430 */
431static int logger_open(struct inode *inode, struct file *file)
432{
433 struct logger_log *log;
434 int ret;
435
436 ret = nonseekable_open(inode, file);
437 if (ret)
438 return ret;
439
440 log = get_log_from_minor(MINOR(inode->i_rdev));
441 if (!log)
442 return -ENODEV;
443
444 if (file->f_mode & FMODE_READ) {
445 struct logger_reader *reader;
446
447 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
448 if (!reader)
449 return -ENOMEM;
450
451 reader->log = log;
452 INIT_LIST_HEAD(&reader->list);
453
454 mutex_lock(&log->mutex);
455 reader->r_off = log->head;
456 list_add_tail(&reader->list, &log->readers);
457 mutex_unlock(&log->mutex);
458
459 file->private_data = reader;
460 } else
461 file->private_data = log;
462
463 return 0;
464}
465
466/*
467 * logger_release - the log's release file operation
468 *
469 * Note this is a total no-op in the write-only case. Keep it that way!
470 */
471static int logger_release(struct inode *ignored, struct file *file)
472{
473 if (file->f_mode & FMODE_READ) {
474 struct logger_reader *reader = file->private_data;
47de87a2
RV
475 struct logger_log *log = reader->log;
476
477 mutex_lock(&log->mutex);
355b0502 478 list_del(&reader->list);
47de87a2
RV
479 mutex_unlock(&log->mutex);
480
355b0502
GKH
481 kfree(reader);
482 }
483
484 return 0;
485}
486
487/*
488 * logger_poll - the log's poll file operation, for poll/select/epoll
489 *
490 * Note we always return POLLOUT, because you can always write() to the log.
491 * Note also that, strictly speaking, a return value of POLLIN does not
492 * guarantee that the log is readable without blocking, as there is a small
493 * chance that the writer can lap the reader in the interim between poll()
494 * returning and the read() request.
495 */
496static unsigned int logger_poll(struct file *file, poll_table *wait)
497{
498 struct logger_reader *reader;
499 struct logger_log *log;
500 unsigned int ret = POLLOUT | POLLWRNORM;
501
502 if (!(file->f_mode & FMODE_READ))
503 return ret;
504
505 reader = file->private_data;
506 log = reader->log;
507
508 poll_wait(file, &log->wq, wait);
509
510 mutex_lock(&log->mutex);
511 if (log->w_off != reader->r_off)
512 ret |= POLLIN | POLLRDNORM;
513 mutex_unlock(&log->mutex);
514
515 return ret;
516}
517
518static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
519{
520 struct logger_log *log = file_get_log(file);
521 struct logger_reader *reader;
522 long ret = -ENOTTY;
523
524 mutex_lock(&log->mutex);
525
526 switch (cmd) {
527 case LOGGER_GET_LOG_BUF_SIZE:
528 ret = log->size;
529 break;
530 case LOGGER_GET_LOG_LEN:
531 if (!(file->f_mode & FMODE_READ)) {
532 ret = -EBADF;
533 break;
534 }
535 reader = file->private_data;
536 if (log->w_off >= reader->r_off)
537 ret = log->w_off - reader->r_off;
538 else
539 ret = (log->size - reader->r_off) + log->w_off;
540 break;
541 case LOGGER_GET_NEXT_ENTRY_LEN:
542 if (!(file->f_mode & FMODE_READ)) {
543 ret = -EBADF;
544 break;
545 }
546 reader = file->private_data;
547 if (log->w_off != reader->r_off)
548 ret = get_entry_len(log, reader->r_off);
549 else
550 ret = 0;
551 break;
552 case LOGGER_FLUSH_LOG:
553 if (!(file->f_mode & FMODE_WRITE)) {
554 ret = -EBADF;
555 break;
556 }
557 list_for_each_entry(reader, &log->readers, list)
558 reader->r_off = log->w_off;
559 log->head = log->w_off;
560 ret = 0;
561 break;
562 }
563
564 mutex_unlock(&log->mutex);
565
566 return ret;
567}
568
569static const struct file_operations logger_fops = {
570 .owner = THIS_MODULE,
571 .read = logger_read,
572 .aio_write = logger_aio_write,
573 .poll = logger_poll,
574 .unlocked_ioctl = logger_ioctl,
575 .compat_ioctl = logger_ioctl,
576 .open = logger_open,
577 .release = logger_release,
578};
579
580/*
10b24199
TB
581 * Log size must be a power of two, greater than LOGGER_ENTRY_MAX_LEN,
582 * and less than LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
355b0502 583 */
10b24199
TB
584static int __init create_log(char *log_name, int size)
585{
586 int ret = 0;
587 struct logger_log *log;
588 unsigned char *buffer;
355b0502 589
10b24199
TB
590 buffer = vmalloc(size);
591 if (buffer == NULL)
592 return -ENOMEM;
355b0502 593
10b24199
TB
594 log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
595 if (log == NULL) {
596 ret = -ENOMEM;
597 goto out_free_buffer;
598 }
599 log->buffer = buffer;
355b0502 600
10b24199
TB
601 log->misc.minor = MISC_DYNAMIC_MINOR;
602 log->misc.name = kstrdup(log_name, GFP_KERNEL);
603 if (log->misc.name == NULL) {
604 ret = -ENOMEM;
605 goto out_free_log;
606 }
607
608 log->misc.fops = &logger_fops;
609 log->misc.parent = NULL;
355b0502 610
10b24199
TB
611 init_waitqueue_head(&log->wq);
612 INIT_LIST_HEAD(&log->readers);
613 mutex_init(&log->mutex);
614 log->w_off = 0;
615 log->head = 0;
616 log->size = size;
617
618 INIT_LIST_HEAD(&log->logs);
619 list_add_tail(&log->logs, &log_list);
620
621 /* finally, initialize the misc device for this log */
355b0502
GKH
622 ret = misc_register(&log->misc);
623 if (unlikely(ret)) {
624 printk(KERN_ERR "logger: failed to register misc "
625 "device for log '%s'!\n", log->misc.name);
10b24199 626 goto out_free_log;
355b0502
GKH
627 }
628
629 printk(KERN_INFO "logger: created %luK log '%s'\n",
630 (unsigned long) log->size >> 10, log->misc.name);
631
632 return 0;
10b24199
TB
633
634out_free_log:
635 kfree(log);
636
637out_free_buffer:
638 vfree(buffer);
639 return ret;
355b0502
GKH
640}
641
642static int __init logger_init(void)
643{
644 int ret;
645
10b24199 646 ret = create_log(LOGGER_LOG_MAIN, 256*1024);
355b0502
GKH
647 if (unlikely(ret))
648 goto out;
649
10b24199 650 ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
355b0502
GKH
651 if (unlikely(ret))
652 goto out;
653
10b24199 654 ret = create_log(LOGGER_LOG_RADIO, 256*1024);
355b0502
GKH
655 if (unlikely(ret))
656 goto out;
657
10b24199 658 ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
3537cdaa
SM
659 if (unlikely(ret))
660 goto out;
661
355b0502
GKH
662out:
663 return ret;
664}
665device_initcall(logger_init);