staging: android: logger: Change logger_offset() from macro to function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / logger.c
CommitLineData
355b0502
GKH
1/*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
23687af9 20#include <linux/sched.h>
355b0502
GKH
21#include <linux/module.h>
22#include <linux/fs.h>
23#include <linux/miscdevice.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
c11a166c 26#include <linux/slab.h>
355b0502
GKH
27#include <linux/time.h>
28#include "logger.h"
29
30#include <asm/ioctls.h>
31
32/*
33 * struct logger_log - represents a specific log, such as 'main' or 'radio'
34 *
35 * This structure lives from module insertion until module removal, so it does
36 * not need additional reference counting. The structure is protected by the
37 * mutex 'mutex'.
38 */
39struct logger_log {
277cdd01 40 unsigned char *buffer;/* the ring buffer itself */
355b0502
GKH
41 struct miscdevice misc; /* misc device representing the log */
42 wait_queue_head_t wq; /* wait queue for readers */
43 struct list_head readers; /* this log's readers */
44 struct mutex mutex; /* mutex protecting buffer */
45 size_t w_off; /* current write head offset */
46 size_t head; /* new readers start here */
47 size_t size; /* size of the log */
48};
49
50/*
51 * struct logger_reader - a logging device open for reading
52 *
53 * This object lives from open to release, so we don't need additional
54 * reference counting. The structure is protected by log->mutex.
55 */
56struct logger_reader {
57 struct logger_log *log; /* associated log */
58 struct list_head list; /* entry in logger_log's list */
59 size_t r_off; /* current read head offset */
60};
61
62/* logger_offset - returns index 'n' into the log via (optimized) modulus */
c626224d
TB
63size_t logger_offset(struct logger_log *log, size_t n)
64{
65 return n & (log->size-1);
66}
67
355b0502
GKH
68
69/*
70 * file_get_log - Given a file structure, return the associated log
71 *
72 * This isn't aesthetic. We have several goals:
73 *
277cdd01
MN
74 * 1) Need to quickly obtain the associated log during an I/O operation
75 * 2) Readers need to maintain state (logger_reader)
76 * 3) Writers need to be very fast (open() should be a near no-op)
355b0502
GKH
77 *
78 * In the reader case, we can trivially go file->logger_reader->logger_log.
79 * For a writer, we don't want to maintain a logger_reader, so we just go
80 * file->logger_log. Thus what file->private_data points at depends on whether
81 * or not the file was opened for reading. This function hides that dirtiness.
82 */
83static inline struct logger_log *file_get_log(struct file *file)
84{
85 if (file->f_mode & FMODE_READ) {
86 struct logger_reader *reader = file->private_data;
87 return reader->log;
88 } else
89 return file->private_data;
90}
91
92/*
93 * get_entry_len - Grabs the length of the payload of the next entry starting
94 * from 'off'.
95 *
96 * Caller needs to hold log->mutex.
97 */
98static __u32 get_entry_len(struct logger_log *log, size_t off)
99{
100 __u16 val;
101
102 switch (log->size - off) {
103 case 1:
104 memcpy(&val, log->buffer + off, 1);
105 memcpy(((char *) &val) + 1, log->buffer, 1);
106 break;
107 default:
108 memcpy(&val, log->buffer + off, 2);
109 }
110
111 return sizeof(struct logger_entry) + val;
112}
113
114/*
115 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
116 * user-space buffer 'buf'. Returns 'count' on success.
117 *
118 * Caller must hold log->mutex.
119 */
120static ssize_t do_read_log_to_user(struct logger_log *log,
121 struct logger_reader *reader,
122 char __user *buf,
123 size_t count)
124{
125 size_t len;
126
127 /*
128 * We read from the log in two disjoint operations. First, we read from
129 * the current read head offset up to 'count' bytes or to the end of
130 * the log, whichever comes first.
131 */
132 len = min(count, log->size - reader->r_off);
133 if (copy_to_user(buf, log->buffer + reader->r_off, len))
134 return -EFAULT;
135
136 /*
137 * Second, we read any remaining bytes, starting back at the head of
138 * the log.
139 */
140 if (count != len)
141 if (copy_to_user(buf + len, log->buffer, count - len))
142 return -EFAULT;
143
c626224d 144 reader->r_off = logger_offset(log, reader->r_off + count);
355b0502
GKH
145
146 return count;
147}
148
149/*
150 * logger_read - our log's read() method
151 *
152 * Behavior:
153 *
277cdd01
MN
154 * - O_NONBLOCK works
155 * - If there are no log entries to read, blocks until log is written to
156 * - Atomically reads exactly one log entry
355b0502
GKH
157 *
158 * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
159 * buffer is insufficient to hold next entry.
160 */
161static ssize_t logger_read(struct file *file, char __user *buf,
162 size_t count, loff_t *pos)
163{
164 struct logger_reader *reader = file->private_data;
165 struct logger_log *log = reader->log;
166 ssize_t ret;
167 DEFINE_WAIT(wait);
168
169start:
170 while (1) {
171 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
172
173 mutex_lock(&log->mutex);
174 ret = (log->w_off == reader->r_off);
175 mutex_unlock(&log->mutex);
176 if (!ret)
177 break;
178
179 if (file->f_flags & O_NONBLOCK) {
180 ret = -EAGAIN;
181 break;
182 }
183
184 if (signal_pending(current)) {
185 ret = -EINTR;
186 break;
187 }
188
189 schedule();
190 }
191
192 finish_wait(&log->wq, &wait);
193 if (ret)
194 return ret;
195
196 mutex_lock(&log->mutex);
197
198 /* is there still something to read or did we race? */
199 if (unlikely(log->w_off == reader->r_off)) {
200 mutex_unlock(&log->mutex);
201 goto start;
202 }
203
204 /* get the size of the next entry */
205 ret = get_entry_len(log, reader->r_off);
206 if (count < ret) {
207 ret = -EINVAL;
208 goto out;
209 }
210
211 /* get exactly one entry from the log */
212 ret = do_read_log_to_user(log, reader, buf, ret);
213
214out:
215 mutex_unlock(&log->mutex);
216
217 return ret;
218}
219
220/*
221 * get_next_entry - return the offset of the first valid entry at least 'len'
222 * bytes after 'off'.
223 *
224 * Caller must hold log->mutex.
225 */
226static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
227{
228 size_t count = 0;
229
230 do {
231 size_t nr = get_entry_len(log, off);
c626224d 232 off = logger_offset(log, off + nr);
355b0502
GKH
233 count += nr;
234 } while (count < len);
235
236 return off;
237}
238
239/*
240 * clock_interval - is a < c < b in mod-space? Put another way, does the line
241 * from a to b cross c?
242 */
243static inline int clock_interval(size_t a, size_t b, size_t c)
244{
245 if (b < a) {
246 if (a < c || b >= c)
247 return 1;
248 } else {
249 if (a < c && b >= c)
250 return 1;
251 }
252
253 return 0;
254}
255
256/*
257 * fix_up_readers - walk the list of all readers and "fix up" any who were
258 * lapped by the writer; also do the same for the default "start head".
259 * We do this by "pulling forward" the readers and start head to the first
260 * entry after the new write head.
261 *
262 * The caller needs to hold log->mutex.
263 */
264static void fix_up_readers(struct logger_log *log, size_t len)
265{
266 size_t old = log->w_off;
c626224d 267 size_t new = logger_offset(log, old + len);
355b0502
GKH
268 struct logger_reader *reader;
269
270 if (clock_interval(old, new, log->head))
271 log->head = get_next_entry(log, log->head, len);
272
273 list_for_each_entry(reader, &log->readers, list)
274 if (clock_interval(old, new, reader->r_off))
275 reader->r_off = get_next_entry(log, reader->r_off, len);
276}
277
278/*
279 * do_write_log - writes 'len' bytes from 'buf' to 'log'
280 *
281 * The caller needs to hold log->mutex.
282 */
283static void do_write_log(struct logger_log *log, const void *buf, size_t count)
284{
285 size_t len;
286
287 len = min(count, log->size - log->w_off);
288 memcpy(log->buffer + log->w_off, buf, len);
289
290 if (count != len)
291 memcpy(log->buffer, buf + len, count - len);
292
c626224d 293 log->w_off = logger_offset(log, log->w_off + count);
355b0502
GKH
294
295}
296
297/*
298 * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
299 * the log 'log'
300 *
301 * The caller needs to hold log->mutex.
302 *
303 * Returns 'count' on success, negative error code on failure.
304 */
305static ssize_t do_write_log_from_user(struct logger_log *log,
306 const void __user *buf, size_t count)
307{
308 size_t len;
309
310 len = min(count, log->size - log->w_off);
311 if (len && copy_from_user(log->buffer + log->w_off, buf, len))
312 return -EFAULT;
313
314 if (count != len)
315 if (copy_from_user(log->buffer, buf + len, count - len))
316 return -EFAULT;
317
c626224d 318 log->w_off = logger_offset(log, log->w_off + count);
355b0502
GKH
319
320 return count;
321}
322
323/*
324 * logger_aio_write - our write method, implementing support for write(),
325 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
326 * them above all else.
327 */
328ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
329 unsigned long nr_segs, loff_t ppos)
330{
331 struct logger_log *log = file_get_log(iocb->ki_filp);
332 size_t orig = log->w_off;
333 struct logger_entry header;
334 struct timespec now;
335 ssize_t ret = 0;
336
337 now = current_kernel_time();
338
339 header.pid = current->tgid;
340 header.tid = current->pid;
341 header.sec = now.tv_sec;
342 header.nsec = now.tv_nsec;
343 header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
344
345 /* null writes succeed, return zero */
346 if (unlikely(!header.len))
347 return 0;
348
349 mutex_lock(&log->mutex);
350
351 /*
352 * Fix up any readers, pulling them forward to the first readable
353 * entry after (what will be) the new write offset. We do this now
354 * because if we partially fail, we can end up with clobbered log
355 * entries that encroach on readable buffer.
356 */
357 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
358
359 do_write_log(log, &header, sizeof(struct logger_entry));
360
361 while (nr_segs-- > 0) {
362 size_t len;
363 ssize_t nr;
364
365 /* figure out how much of this vector we can keep */
366 len = min_t(size_t, iov->iov_len, header.len - ret);
367
368 /* write out this segment's payload */
369 nr = do_write_log_from_user(log, iov->iov_base, len);
370 if (unlikely(nr < 0)) {
371 log->w_off = orig;
372 mutex_unlock(&log->mutex);
373 return nr;
374 }
375
376 iov++;
377 ret += nr;
378 }
379
380 mutex_unlock(&log->mutex);
381
382 /* wake up any blocked readers */
383 wake_up_interruptible(&log->wq);
384
385 return ret;
386}
387
388static struct logger_log *get_log_from_minor(int);
389
390/*
391 * logger_open - the log's open() file operation
392 *
393 * Note how near a no-op this is in the write-only case. Keep it that way!
394 */
395static int logger_open(struct inode *inode, struct file *file)
396{
397 struct logger_log *log;
398 int ret;
399
400 ret = nonseekable_open(inode, file);
401 if (ret)
402 return ret;
403
404 log = get_log_from_minor(MINOR(inode->i_rdev));
405 if (!log)
406 return -ENODEV;
407
408 if (file->f_mode & FMODE_READ) {
409 struct logger_reader *reader;
410
411 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
412 if (!reader)
413 return -ENOMEM;
414
415 reader->log = log;
416 INIT_LIST_HEAD(&reader->list);
417
418 mutex_lock(&log->mutex);
419 reader->r_off = log->head;
420 list_add_tail(&reader->list, &log->readers);
421 mutex_unlock(&log->mutex);
422
423 file->private_data = reader;
424 } else
425 file->private_data = log;
426
427 return 0;
428}
429
430/*
431 * logger_release - the log's release file operation
432 *
433 * Note this is a total no-op in the write-only case. Keep it that way!
434 */
435static int logger_release(struct inode *ignored, struct file *file)
436{
437 if (file->f_mode & FMODE_READ) {
438 struct logger_reader *reader = file->private_data;
439 list_del(&reader->list);
440 kfree(reader);
441 }
442
443 return 0;
444}
445
446/*
447 * logger_poll - the log's poll file operation, for poll/select/epoll
448 *
449 * Note we always return POLLOUT, because you can always write() to the log.
450 * Note also that, strictly speaking, a return value of POLLIN does not
451 * guarantee that the log is readable without blocking, as there is a small
452 * chance that the writer can lap the reader in the interim between poll()
453 * returning and the read() request.
454 */
455static unsigned int logger_poll(struct file *file, poll_table *wait)
456{
457 struct logger_reader *reader;
458 struct logger_log *log;
459 unsigned int ret = POLLOUT | POLLWRNORM;
460
461 if (!(file->f_mode & FMODE_READ))
462 return ret;
463
464 reader = file->private_data;
465 log = reader->log;
466
467 poll_wait(file, &log->wq, wait);
468
469 mutex_lock(&log->mutex);
470 if (log->w_off != reader->r_off)
471 ret |= POLLIN | POLLRDNORM;
472 mutex_unlock(&log->mutex);
473
474 return ret;
475}
476
477static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
478{
479 struct logger_log *log = file_get_log(file);
480 struct logger_reader *reader;
481 long ret = -ENOTTY;
482
483 mutex_lock(&log->mutex);
484
485 switch (cmd) {
486 case LOGGER_GET_LOG_BUF_SIZE:
487 ret = log->size;
488 break;
489 case LOGGER_GET_LOG_LEN:
490 if (!(file->f_mode & FMODE_READ)) {
491 ret = -EBADF;
492 break;
493 }
494 reader = file->private_data;
495 if (log->w_off >= reader->r_off)
496 ret = log->w_off - reader->r_off;
497 else
498 ret = (log->size - reader->r_off) + log->w_off;
499 break;
500 case LOGGER_GET_NEXT_ENTRY_LEN:
501 if (!(file->f_mode & FMODE_READ)) {
502 ret = -EBADF;
503 break;
504 }
505 reader = file->private_data;
506 if (log->w_off != reader->r_off)
507 ret = get_entry_len(log, reader->r_off);
508 else
509 ret = 0;
510 break;
511 case LOGGER_FLUSH_LOG:
512 if (!(file->f_mode & FMODE_WRITE)) {
513 ret = -EBADF;
514 break;
515 }
516 list_for_each_entry(reader, &log->readers, list)
517 reader->r_off = log->w_off;
518 log->head = log->w_off;
519 ret = 0;
520 break;
521 }
522
523 mutex_unlock(&log->mutex);
524
525 return ret;
526}
527
528static const struct file_operations logger_fops = {
529 .owner = THIS_MODULE,
530 .read = logger_read,
531 .aio_write = logger_aio_write,
532 .poll = logger_poll,
533 .unlocked_ioctl = logger_ioctl,
534 .compat_ioctl = logger_ioctl,
535 .open = logger_open,
536 .release = logger_release,
537};
538
539/*
540 * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
541 * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
542 * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
543 */
544#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
545static unsigned char _buf_ ## VAR[SIZE]; \
546static struct logger_log VAR = { \
547 .buffer = _buf_ ## VAR, \
548 .misc = { \
549 .minor = MISC_DYNAMIC_MINOR, \
550 .name = NAME, \
551 .fops = &logger_fops, \
552 .parent = NULL, \
553 }, \
554 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
555 .readers = LIST_HEAD_INIT(VAR .readers), \
556 .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
557 .w_off = 0, \
558 .head = 0, \
559 .size = SIZE, \
560};
561
2b374956 562DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
355b0502 563DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
2b374956
JA
564DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
565DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
355b0502
GKH
566
567static struct logger_log *get_log_from_minor(int minor)
568{
569 if (log_main.misc.minor == minor)
570 return &log_main;
571 if (log_events.misc.minor == minor)
572 return &log_events;
573 if (log_radio.misc.minor == minor)
574 return &log_radio;
3537cdaa
SM
575 if (log_system.misc.minor == minor)
576 return &log_system;
355b0502
GKH
577 return NULL;
578}
579
580static int __init init_log(struct logger_log *log)
581{
582 int ret;
583
584 ret = misc_register(&log->misc);
585 if (unlikely(ret)) {
586 printk(KERN_ERR "logger: failed to register misc "
587 "device for log '%s'!\n", log->misc.name);
588 return ret;
589 }
590
591 printk(KERN_INFO "logger: created %luK log '%s'\n",
592 (unsigned long) log->size >> 10, log->misc.name);
593
594 return 0;
595}
596
597static int __init logger_init(void)
598{
599 int ret;
600
601 ret = init_log(&log_main);
602 if (unlikely(ret))
603 goto out;
604
605 ret = init_log(&log_events);
606 if (unlikely(ret))
607 goto out;
608
609 ret = init_log(&log_radio);
610 if (unlikely(ret))
611 goto out;
612
3537cdaa
SM
613 ret = init_log(&log_system);
614 if (unlikely(ret))
615 goto out;
616
355b0502
GKH
617out:
618 return ret;
619}
620device_initcall(logger_init);