ftrace: add ftrace_graph_stop()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
182e9f5f
SR
19#include "trace.h"
20
033601a3
SR
21/*
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244
SR
61
62/**
63 * tracing_on - enable all tracing buffers
64 *
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
67 */
68void tracing_on(void)
69{
033601a3 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244
SR
71}
72
73/**
74 * tracing_off - turn off all tracing buffers
75 *
76 * This function stops all tracing buffers from recording data.
77 * It does not disable any overhead the tracers themselves may
78 * be causing. This function simply causes all recording to
79 * the ring buffers to fail.
80 */
81void tracing_off(void)
82{
033601a3
SR
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84}
85
86/**
87 * tracing_off_permanent - permanently disable ring buffers
88 *
89 * This function, once called, will disable all ring buffers
90 * permanenty.
91 */
92void tracing_off_permanent(void)
93{
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
95}
96
d06bbd66
IM
97#include "trace.h"
98
7a8e76a3
SR
99/* Up this if you want to test the TIME_EXTENTS and normalization */
100#define DEBUG_SHIFT 0
101
102/* FIXME!!! */
103u64 ring_buffer_time_stamp(int cpu)
104{
47e74f2b
SR
105 u64 time;
106
107 preempt_disable_notrace();
7a8e76a3 108 /* shift to debug/test normalization and TIME_EXTENTS */
47e74f2b
SR
109 time = sched_clock() << DEBUG_SHIFT;
110 preempt_enable_notrace();
111
112 return time;
7a8e76a3
SR
113}
114
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{
117 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT;
119}
120
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2
123#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
124#define RB_MAX_SMALL_DATA 28
125
126enum {
127 RB_LEN_TIME_EXTEND = 8,
128 RB_LEN_TIME_STAMP = 16,
129};
130
131/* inline for ring buffer fast paths */
132static inline unsigned
133rb_event_length(struct ring_buffer_event *event)
134{
135 unsigned length;
136
137 switch (event->type) {
138 case RINGBUF_TYPE_PADDING:
139 /* undefined */
140 return -1;
141
142 case RINGBUF_TYPE_TIME_EXTEND:
143 return RB_LEN_TIME_EXTEND;
144
145 case RINGBUF_TYPE_TIME_STAMP:
146 return RB_LEN_TIME_STAMP;
147
148 case RINGBUF_TYPE_DATA:
149 if (event->len)
150 length = event->len << RB_ALIGNMENT_SHIFT;
151 else
152 length = event->array[0];
153 return length + RB_EVNT_HDR_SIZE;
154 default:
155 BUG();
156 }
157 /* not hit */
158 return 0;
159}
160
161/**
162 * ring_buffer_event_length - return the length of the event
163 * @event: the event to get the length of
164 */
165unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{
167 return rb_event_length(event);
168}
169
170/* inline for ring buffer fast paths */
171static inline void *
172rb_event_data(struct ring_buffer_event *event)
173{
174 BUG_ON(event->type != RINGBUF_TYPE_DATA);
175 /* If length is in len field, then array[0] has the data */
176 if (event->len)
177 return (void *)&event->array[0];
178 /* Otherwise length is in array[0] and array[1] has the data */
179 return (void *)&event->array[1];
180}
181
182/**
183 * ring_buffer_event_data - return the data of the event
184 * @event: the event to get the data from
185 */
186void *ring_buffer_event_data(struct ring_buffer_event *event)
187{
188 return rb_event_data(event);
189}
190
191#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask)
193
194#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1)
196#define TS_DELTA_TEST (~TS_MASK)
197
abc9b56d 198struct buffer_data_page {
e4c2ce82 199 u64 time_stamp; /* page time stamp */
bf41a158 200 local_t commit; /* write commited index */
abc9b56d
SR
201 unsigned char data[]; /* data of buffer page */
202};
203
204struct buffer_page {
205 local_t write; /* index for next write */
6f807acd 206 unsigned read; /* index for next read */
e4c2ce82 207 struct list_head list; /* list of free pages */
abc9b56d 208 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
209};
210
abc9b56d
SR
211static void rb_init_page(struct buffer_data_page *page)
212{
213 local_set(&page->commit, 0);
214}
215
ed56829c
SR
216/*
217 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
218 * this issue out.
219 */
220static inline void free_buffer_page(struct buffer_page *bpage)
221{
e4c2ce82 222 if (bpage->page)
6ae2a076 223 free_page((unsigned long)bpage->page);
e4c2ce82 224 kfree(bpage);
ed56829c
SR
225}
226
7a8e76a3
SR
227/*
228 * We need to fit the time_stamp delta into 27 bits.
229 */
230static inline int test_time_stamp(u64 delta)
231{
232 if (delta & TS_DELTA_TEST)
233 return 1;
234 return 0;
235}
236
abc9b56d 237#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
7a8e76a3
SR
238
239/*
240 * head_page == tail_page && head == tail then buffer is empty.
241 */
242struct ring_buffer_per_cpu {
243 int cpu;
244 struct ring_buffer *buffer;
f83c9d0f 245 spinlock_t reader_lock; /* serialize readers */
3e03fb7f 246 raw_spinlock_t lock;
7a8e76a3
SR
247 struct lock_class_key lock_key;
248 struct list_head pages;
6f807acd
SR
249 struct buffer_page *head_page; /* read from head */
250 struct buffer_page *tail_page; /* write to tail */
bf41a158 251 struct buffer_page *commit_page; /* commited pages */
d769041f 252 struct buffer_page *reader_page;
7a8e76a3
SR
253 unsigned long overrun;
254 unsigned long entries;
255 u64 write_stamp;
256 u64 read_stamp;
257 atomic_t record_disabled;
258};
259
260struct ring_buffer {
261 unsigned long size;
262 unsigned pages;
263 unsigned flags;
264 int cpus;
265 cpumask_t cpumask;
266 atomic_t record_disabled;
267
268 struct mutex mutex;
269
270 struct ring_buffer_per_cpu **buffers;
271};
272
273struct ring_buffer_iter {
274 struct ring_buffer_per_cpu *cpu_buffer;
275 unsigned long head;
276 struct buffer_page *head_page;
277 u64 read_stamp;
278};
279
f536aafc 280/* buffer may be either ring_buffer or ring_buffer_per_cpu */
bf41a158 281#define RB_WARN_ON(buffer, cond) \
3e89c7bb
SR
282 ({ \
283 int _____ret = unlikely(cond); \
284 if (_____ret) { \
bf41a158
SR
285 atomic_inc(&buffer->record_disabled); \
286 WARN_ON(1); \
287 } \
3e89c7bb
SR
288 _____ret; \
289 })
f536aafc 290
7a8e76a3
SR
291/**
292 * check_pages - integrity check of buffer pages
293 * @cpu_buffer: CPU buffer with pages to test
294 *
295 * As a safty measure we check to make sure the data pages have not
296 * been corrupted.
297 */
298static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
299{
300 struct list_head *head = &cpu_buffer->pages;
301 struct buffer_page *page, *tmp;
302
3e89c7bb
SR
303 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
304 return -1;
305 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
306 return -1;
7a8e76a3
SR
307
308 list_for_each_entry_safe(page, tmp, head, list) {
3e89c7bb
SR
309 if (RB_WARN_ON(cpu_buffer,
310 page->list.next->prev != &page->list))
311 return -1;
312 if (RB_WARN_ON(cpu_buffer,
313 page->list.prev->next != &page->list))
314 return -1;
7a8e76a3
SR
315 }
316
317 return 0;
318}
319
7a8e76a3
SR
320static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
321 unsigned nr_pages)
322{
323 struct list_head *head = &cpu_buffer->pages;
324 struct buffer_page *page, *tmp;
325 unsigned long addr;
326 LIST_HEAD(pages);
327 unsigned i;
328
329 for (i = 0; i < nr_pages; i++) {
e4c2ce82 330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
aa1e0e3b 331 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
e4c2ce82
SR
332 if (!page)
333 goto free_pages;
334 list_add(&page->list, &pages);
335
7a8e76a3
SR
336 addr = __get_free_page(GFP_KERNEL);
337 if (!addr)
338 goto free_pages;
e4c2ce82 339 page->page = (void *)addr;
abc9b56d 340 rb_init_page(page->page);
7a8e76a3
SR
341 }
342
343 list_splice(&pages, head);
344
345 rb_check_pages(cpu_buffer);
346
347 return 0;
348
349 free_pages:
350 list_for_each_entry_safe(page, tmp, &pages, list) {
351 list_del_init(&page->list);
ed56829c 352 free_buffer_page(page);
7a8e76a3
SR
353 }
354 return -ENOMEM;
355}
356
357static struct ring_buffer_per_cpu *
358rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
359{
360 struct ring_buffer_per_cpu *cpu_buffer;
e4c2ce82 361 struct buffer_page *page;
d769041f 362 unsigned long addr;
7a8e76a3
SR
363 int ret;
364
365 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
366 GFP_KERNEL, cpu_to_node(cpu));
367 if (!cpu_buffer)
368 return NULL;
369
370 cpu_buffer->cpu = cpu;
371 cpu_buffer->buffer = buffer;
f83c9d0f 372 spin_lock_init(&cpu_buffer->reader_lock);
3e03fb7f 373 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
374 INIT_LIST_HEAD(&cpu_buffer->pages);
375
e4c2ce82
SR
376 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
377 GFP_KERNEL, cpu_to_node(cpu));
378 if (!page)
379 goto fail_free_buffer;
380
381 cpu_buffer->reader_page = page;
d769041f
SR
382 addr = __get_free_page(GFP_KERNEL);
383 if (!addr)
e4c2ce82
SR
384 goto fail_free_reader;
385 page->page = (void *)addr;
abc9b56d 386 rb_init_page(page->page);
e4c2ce82 387
d769041f 388 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 389
7a8e76a3
SR
390 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
391 if (ret < 0)
d769041f 392 goto fail_free_reader;
7a8e76a3
SR
393
394 cpu_buffer->head_page
395 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 396 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
397
398 return cpu_buffer;
399
d769041f
SR
400 fail_free_reader:
401 free_buffer_page(cpu_buffer->reader_page);
402
7a8e76a3
SR
403 fail_free_buffer:
404 kfree(cpu_buffer);
405 return NULL;
406}
407
408static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
409{
410 struct list_head *head = &cpu_buffer->pages;
411 struct buffer_page *page, *tmp;
412
d769041f
SR
413 list_del_init(&cpu_buffer->reader_page->list);
414 free_buffer_page(cpu_buffer->reader_page);
415
7a8e76a3
SR
416 list_for_each_entry_safe(page, tmp, head, list) {
417 list_del_init(&page->list);
ed56829c 418 free_buffer_page(page);
7a8e76a3
SR
419 }
420 kfree(cpu_buffer);
421}
422
a7b13743
SR
423/*
424 * Causes compile errors if the struct buffer_page gets bigger
425 * than the struct page.
426 */
427extern int ring_buffer_page_too_big(void);
428
7a8e76a3
SR
429/**
430 * ring_buffer_alloc - allocate a new ring_buffer
431 * @size: the size in bytes that is needed.
432 * @flags: attributes to set for the ring buffer.
433 *
434 * Currently the only flag that is available is the RB_FL_OVERWRITE
435 * flag. This flag means that the buffer will overwrite old data
436 * when the buffer wraps. If this flag is not set, the buffer will
437 * drop data when the tail hits the head.
438 */
439struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
440{
441 struct ring_buffer *buffer;
442 int bsize;
443 int cpu;
444
a7b13743
SR
445 /* Paranoid! Optimizes out when all is well */
446 if (sizeof(struct buffer_page) > sizeof(struct page))
447 ring_buffer_page_too_big();
448
449
7a8e76a3
SR
450 /* keep it in its own cache line */
451 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
452 GFP_KERNEL);
453 if (!buffer)
454 return NULL;
455
456 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
457 buffer->flags = flags;
458
459 /* need at least two pages */
460 if (buffer->pages == 1)
461 buffer->pages++;
462
463 buffer->cpumask = cpu_possible_map;
464 buffer->cpus = nr_cpu_ids;
465
466 bsize = sizeof(void *) * nr_cpu_ids;
467 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
468 GFP_KERNEL);
469 if (!buffer->buffers)
470 goto fail_free_buffer;
471
472 for_each_buffer_cpu(buffer, cpu) {
473 buffer->buffers[cpu] =
474 rb_allocate_cpu_buffer(buffer, cpu);
475 if (!buffer->buffers[cpu])
476 goto fail_free_buffers;
477 }
478
479 mutex_init(&buffer->mutex);
480
481 return buffer;
482
483 fail_free_buffers:
484 for_each_buffer_cpu(buffer, cpu) {
485 if (buffer->buffers[cpu])
486 rb_free_cpu_buffer(buffer->buffers[cpu]);
487 }
488 kfree(buffer->buffers);
489
490 fail_free_buffer:
491 kfree(buffer);
492 return NULL;
493}
494
495/**
496 * ring_buffer_free - free a ring buffer.
497 * @buffer: the buffer to free.
498 */
499void
500ring_buffer_free(struct ring_buffer *buffer)
501{
502 int cpu;
503
504 for_each_buffer_cpu(buffer, cpu)
505 rb_free_cpu_buffer(buffer->buffers[cpu]);
506
507 kfree(buffer);
508}
509
510static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
511
512static void
513rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
514{
515 struct buffer_page *page;
516 struct list_head *p;
517 unsigned i;
518
519 atomic_inc(&cpu_buffer->record_disabled);
520 synchronize_sched();
521
522 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
523 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
524 return;
7a8e76a3
SR
525 p = cpu_buffer->pages.next;
526 page = list_entry(p, struct buffer_page, list);
527 list_del_init(&page->list);
ed56829c 528 free_buffer_page(page);
7a8e76a3 529 }
3e89c7bb
SR
530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
531 return;
7a8e76a3
SR
532
533 rb_reset_cpu(cpu_buffer);
534
535 rb_check_pages(cpu_buffer);
536
537 atomic_dec(&cpu_buffer->record_disabled);
538
539}
540
541static void
542rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
543 struct list_head *pages, unsigned nr_pages)
544{
545 struct buffer_page *page;
546 struct list_head *p;
547 unsigned i;
548
549 atomic_inc(&cpu_buffer->record_disabled);
550 synchronize_sched();
551
552 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
553 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
554 return;
7a8e76a3
SR
555 p = pages->next;
556 page = list_entry(p, struct buffer_page, list);
557 list_del_init(&page->list);
558 list_add_tail(&page->list, &cpu_buffer->pages);
559 }
560 rb_reset_cpu(cpu_buffer);
561
562 rb_check_pages(cpu_buffer);
563
564 atomic_dec(&cpu_buffer->record_disabled);
565}
566
567/**
568 * ring_buffer_resize - resize the ring buffer
569 * @buffer: the buffer to resize.
570 * @size: the new size.
571 *
572 * The tracer is responsible for making sure that the buffer is
573 * not being used while changing the size.
574 * Note: We may be able to change the above requirement by using
575 * RCU synchronizations.
576 *
577 * Minimum size is 2 * BUF_PAGE_SIZE.
578 *
579 * Returns -1 on failure.
580 */
581int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
582{
583 struct ring_buffer_per_cpu *cpu_buffer;
584 unsigned nr_pages, rm_pages, new_pages;
585 struct buffer_page *page, *tmp;
586 unsigned long buffer_size;
587 unsigned long addr;
588 LIST_HEAD(pages);
589 int i, cpu;
590
ee51a1de
IM
591 /*
592 * Always succeed at resizing a non-existent buffer:
593 */
594 if (!buffer)
595 return size;
596
7a8e76a3
SR
597 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
598 size *= BUF_PAGE_SIZE;
599 buffer_size = buffer->pages * BUF_PAGE_SIZE;
600
601 /* we need a minimum of two pages */
602 if (size < BUF_PAGE_SIZE * 2)
603 size = BUF_PAGE_SIZE * 2;
604
605 if (size == buffer_size)
606 return size;
607
608 mutex_lock(&buffer->mutex);
609
610 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
611
612 if (size < buffer_size) {
613
614 /* easy case, just free pages */
3e89c7bb
SR
615 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
616 mutex_unlock(&buffer->mutex);
617 return -1;
618 }
7a8e76a3
SR
619
620 rm_pages = buffer->pages - nr_pages;
621
622 for_each_buffer_cpu(buffer, cpu) {
623 cpu_buffer = buffer->buffers[cpu];
624 rb_remove_pages(cpu_buffer, rm_pages);
625 }
626 goto out;
627 }
628
629 /*
630 * This is a bit more difficult. We only want to add pages
631 * when we can allocate enough for all CPUs. We do this
632 * by allocating all the pages and storing them on a local
633 * link list. If we succeed in our allocation, then we
634 * add these pages to the cpu_buffers. Otherwise we just free
635 * them all and return -ENOMEM;
636 */
3e89c7bb
SR
637 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
638 mutex_unlock(&buffer->mutex);
639 return -1;
640 }
f536aafc 641
7a8e76a3
SR
642 new_pages = nr_pages - buffer->pages;
643
644 for_each_buffer_cpu(buffer, cpu) {
645 for (i = 0; i < new_pages; i++) {
e4c2ce82
SR
646 page = kzalloc_node(ALIGN(sizeof(*page),
647 cache_line_size()),
648 GFP_KERNEL, cpu_to_node(cpu));
649 if (!page)
650 goto free_pages;
651 list_add(&page->list, &pages);
7a8e76a3
SR
652 addr = __get_free_page(GFP_KERNEL);
653 if (!addr)
654 goto free_pages;
e4c2ce82 655 page->page = (void *)addr;
abc9b56d 656 rb_init_page(page->page);
7a8e76a3
SR
657 }
658 }
659
660 for_each_buffer_cpu(buffer, cpu) {
661 cpu_buffer = buffer->buffers[cpu];
662 rb_insert_pages(cpu_buffer, &pages, new_pages);
663 }
664
3e89c7bb
SR
665 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
666 mutex_unlock(&buffer->mutex);
667 return -1;
668 }
7a8e76a3
SR
669
670 out:
671 buffer->pages = nr_pages;
672 mutex_unlock(&buffer->mutex);
673
674 return size;
675
676 free_pages:
677 list_for_each_entry_safe(page, tmp, &pages, list) {
678 list_del_init(&page->list);
ed56829c 679 free_buffer_page(page);
7a8e76a3 680 }
641d2f63 681 mutex_unlock(&buffer->mutex);
7a8e76a3
SR
682 return -ENOMEM;
683}
684
7a8e76a3
SR
685static inline int rb_null_event(struct ring_buffer_event *event)
686{
687 return event->type == RINGBUF_TYPE_PADDING;
688}
689
8789a9e7
SR
690static inline void *
691__rb_data_page_index(struct buffer_data_page *page, unsigned index)
692{
693 return page->data + index;
694}
695
6f807acd 696static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
7a8e76a3 697{
abc9b56d 698 return page->page->data + index;
7a8e76a3
SR
699}
700
701static inline struct ring_buffer_event *
d769041f 702rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 703{
6f807acd
SR
704 return __rb_page_index(cpu_buffer->reader_page,
705 cpu_buffer->reader_page->read);
706}
707
708static inline struct ring_buffer_event *
709rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
710{
711 return __rb_page_index(cpu_buffer->head_page,
712 cpu_buffer->head_page->read);
7a8e76a3
SR
713}
714
715static inline struct ring_buffer_event *
716rb_iter_head_event(struct ring_buffer_iter *iter)
717{
6f807acd 718 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
719}
720
bf41a158
SR
721static inline unsigned rb_page_write(struct buffer_page *bpage)
722{
723 return local_read(&bpage->write);
724}
725
726static inline unsigned rb_page_commit(struct buffer_page *bpage)
727{
abc9b56d 728 return local_read(&bpage->page->commit);
bf41a158
SR
729}
730
731/* Size is determined by what has been commited */
732static inline unsigned rb_page_size(struct buffer_page *bpage)
733{
734 return rb_page_commit(bpage);
735}
736
737static inline unsigned
738rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
739{
740 return rb_page_commit(cpu_buffer->commit_page);
741}
742
743static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
744{
745 return rb_page_commit(cpu_buffer->head_page);
746}
747
7a8e76a3
SR
748/*
749 * When the tail hits the head and the buffer is in overwrite mode,
750 * the head jumps to the next page and all content on the previous
751 * page is discarded. But before doing so, we update the overrun
752 * variable of the buffer.
753 */
754static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
755{
756 struct ring_buffer_event *event;
757 unsigned long head;
758
759 for (head = 0; head < rb_head_size(cpu_buffer);
760 head += rb_event_length(event)) {
761
6f807acd 762 event = __rb_page_index(cpu_buffer->head_page, head);
3e89c7bb
SR
763 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
764 return;
7a8e76a3
SR
765 /* Only count data entries */
766 if (event->type != RINGBUF_TYPE_DATA)
767 continue;
768 cpu_buffer->overrun++;
769 cpu_buffer->entries--;
770 }
771}
772
773static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
774 struct buffer_page **page)
775{
776 struct list_head *p = (*page)->list.next;
777
778 if (p == &cpu_buffer->pages)
779 p = p->next;
780
781 *page = list_entry(p, struct buffer_page, list);
782}
783
bf41a158
SR
784static inline unsigned
785rb_event_index(struct ring_buffer_event *event)
786{
787 unsigned long addr = (unsigned long)event;
788
789 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
790}
791
792static inline int
793rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
794 struct ring_buffer_event *event)
795{
796 unsigned long addr = (unsigned long)event;
797 unsigned long index;
798
799 index = rb_event_index(event);
800 addr &= PAGE_MASK;
801
802 return cpu_buffer->commit_page->page == (void *)addr &&
803 rb_commit_index(cpu_buffer) == index;
804}
805
7a8e76a3 806static inline void
bf41a158
SR
807rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
808 struct ring_buffer_event *event)
7a8e76a3 809{
bf41a158
SR
810 unsigned long addr = (unsigned long)event;
811 unsigned long index;
812
813 index = rb_event_index(event);
814 addr &= PAGE_MASK;
815
816 while (cpu_buffer->commit_page->page != (void *)addr) {
3e89c7bb
SR
817 if (RB_WARN_ON(cpu_buffer,
818 cpu_buffer->commit_page == cpu_buffer->tail_page))
819 return;
abc9b56d 820 cpu_buffer->commit_page->page->commit =
bf41a158
SR
821 cpu_buffer->commit_page->write;
822 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
823 cpu_buffer->write_stamp =
824 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
825 }
826
827 /* Now set the commit to the event's index */
abc9b56d 828 local_set(&cpu_buffer->commit_page->page->commit, index);
7a8e76a3
SR
829}
830
bf41a158
SR
831static inline void
832rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 833{
bf41a158
SR
834 /*
835 * We only race with interrupts and NMIs on this CPU.
836 * If we own the commit event, then we can commit
837 * all others that interrupted us, since the interruptions
838 * are in stack format (they finish before they come
839 * back to us). This allows us to do a simple loop to
840 * assign the commit to the tail.
841 */
842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
abc9b56d 843 cpu_buffer->commit_page->page->commit =
bf41a158
SR
844 cpu_buffer->commit_page->write;
845 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
846 cpu_buffer->write_stamp =
847 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
848 /* add barrier to keep gcc from optimizing too much */
849 barrier();
850 }
851 while (rb_commit_index(cpu_buffer) !=
852 rb_page_write(cpu_buffer->commit_page)) {
abc9b56d 853 cpu_buffer->commit_page->page->commit =
bf41a158
SR
854 cpu_buffer->commit_page->write;
855 barrier();
856 }
7a8e76a3
SR
857}
858
d769041f 859static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 860{
abc9b56d 861 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 862 cpu_buffer->reader_page->read = 0;
d769041f
SR
863}
864
865static inline void rb_inc_iter(struct ring_buffer_iter *iter)
866{
867 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
868
869 /*
870 * The iterator could be on the reader page (it starts there).
871 * But the head could have moved, since the reader was
872 * found. Check for this case and assign the iterator
873 * to the head page instead of next.
874 */
875 if (iter->head_page == cpu_buffer->reader_page)
876 iter->head_page = cpu_buffer->head_page;
877 else
878 rb_inc_page(cpu_buffer, &iter->head_page);
879
abc9b56d 880 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
881 iter->head = 0;
882}
883
884/**
885 * ring_buffer_update_event - update event type and data
886 * @event: the even to update
887 * @type: the type of event
888 * @length: the size of the event field in the ring buffer
889 *
890 * Update the type and data fields of the event. The length
891 * is the actual size that is written to the ring buffer,
892 * and with this, we can determine what to place into the
893 * data field.
894 */
895static inline void
896rb_update_event(struct ring_buffer_event *event,
897 unsigned type, unsigned length)
898{
899 event->type = type;
900
901 switch (type) {
902
903 case RINGBUF_TYPE_PADDING:
904 break;
905
906 case RINGBUF_TYPE_TIME_EXTEND:
907 event->len =
908 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
909 >> RB_ALIGNMENT_SHIFT;
910 break;
911
912 case RINGBUF_TYPE_TIME_STAMP:
913 event->len =
914 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
915 >> RB_ALIGNMENT_SHIFT;
916 break;
917
918 case RINGBUF_TYPE_DATA:
919 length -= RB_EVNT_HDR_SIZE;
920 if (length > RB_MAX_SMALL_DATA) {
921 event->len = 0;
922 event->array[0] = length;
923 } else
924 event->len =
925 (length + (RB_ALIGNMENT-1))
926 >> RB_ALIGNMENT_SHIFT;
927 break;
928 default:
929 BUG();
930 }
931}
932
933static inline unsigned rb_calculate_event_length(unsigned length)
934{
935 struct ring_buffer_event event; /* Used only for sizeof array */
936
937 /* zero length can cause confusions */
938 if (!length)
939 length = 1;
940
941 if (length > RB_MAX_SMALL_DATA)
942 length += sizeof(event.array[0]);
943
944 length += RB_EVNT_HDR_SIZE;
945 length = ALIGN(length, RB_ALIGNMENT);
946
947 return length;
948}
949
950static struct ring_buffer_event *
951__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
952 unsigned type, unsigned long length, u64 *ts)
953{
d769041f 954 struct buffer_page *tail_page, *head_page, *reader_page;
bf41a158 955 unsigned long tail, write;
7a8e76a3
SR
956 struct ring_buffer *buffer = cpu_buffer->buffer;
957 struct ring_buffer_event *event;
bf41a158 958 unsigned long flags;
7a8e76a3
SR
959
960 tail_page = cpu_buffer->tail_page;
bf41a158
SR
961 write = local_add_return(length, &tail_page->write);
962 tail = write - length;
7a8e76a3 963
bf41a158
SR
964 /* See if we shot pass the end of this buffer page */
965 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
966 struct buffer_page *next_page = tail_page;
967
3e03fb7f
SR
968 local_irq_save(flags);
969 __raw_spin_lock(&cpu_buffer->lock);
bf41a158 970
7a8e76a3
SR
971 rb_inc_page(cpu_buffer, &next_page);
972
d769041f
SR
973 head_page = cpu_buffer->head_page;
974 reader_page = cpu_buffer->reader_page;
975
976 /* we grabbed the lock before incrementing */
3e89c7bb
SR
977 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
978 goto out_unlock;
bf41a158
SR
979
980 /*
981 * If for some reason, we had an interrupt storm that made
982 * it all the way around the buffer, bail, and warn
983 * about it.
984 */
985 if (unlikely(next_page == cpu_buffer->commit_page)) {
986 WARN_ON_ONCE(1);
987 goto out_unlock;
988 }
d769041f 989
7a8e76a3 990 if (next_page == head_page) {
d769041f 991 if (!(buffer->flags & RB_FL_OVERWRITE)) {
bf41a158
SR
992 /* reset write */
993 if (tail <= BUF_PAGE_SIZE)
994 local_set(&tail_page->write, tail);
995 goto out_unlock;
d769041f 996 }
7a8e76a3 997
bf41a158
SR
998 /* tail_page has not moved yet? */
999 if (tail_page == cpu_buffer->tail_page) {
1000 /* count overflows */
1001 rb_update_overflow(cpu_buffer);
1002
1003 rb_inc_page(cpu_buffer, &head_page);
1004 cpu_buffer->head_page = head_page;
1005 cpu_buffer->head_page->read = 0;
1006 }
1007 }
7a8e76a3 1008
bf41a158
SR
1009 /*
1010 * If the tail page is still the same as what we think
1011 * it is, then it is up to us to update the tail
1012 * pointer.
1013 */
1014 if (tail_page == cpu_buffer->tail_page) {
1015 local_set(&next_page->write, 0);
abc9b56d 1016 local_set(&next_page->page->commit, 0);
bf41a158
SR
1017 cpu_buffer->tail_page = next_page;
1018
1019 /* reread the time stamp */
1020 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
abc9b56d 1021 cpu_buffer->tail_page->page->time_stamp = *ts;
7a8e76a3
SR
1022 }
1023
bf41a158
SR
1024 /*
1025 * The actual tail page has moved forward.
1026 */
1027 if (tail < BUF_PAGE_SIZE) {
1028 /* Mark the rest of the page with padding */
6f807acd 1029 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
1030 event->type = RINGBUF_TYPE_PADDING;
1031 }
1032
bf41a158
SR
1033 if (tail <= BUF_PAGE_SIZE)
1034 /* Set the write back to the previous setting */
1035 local_set(&tail_page->write, tail);
1036
1037 /*
1038 * If this was a commit entry that failed,
1039 * increment that too
1040 */
1041 if (tail_page == cpu_buffer->commit_page &&
1042 tail == rb_commit_index(cpu_buffer)) {
1043 rb_set_commit_to_write(cpu_buffer);
1044 }
1045
3e03fb7f
SR
1046 __raw_spin_unlock(&cpu_buffer->lock);
1047 local_irq_restore(flags);
bf41a158
SR
1048
1049 /* fail and let the caller try again */
1050 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
1051 }
1052
bf41a158
SR
1053 /* We reserved something on the buffer */
1054
3e89c7bb
SR
1055 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1056 return NULL;
7a8e76a3 1057
6f807acd 1058 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
1059 rb_update_event(event, type, length);
1060
bf41a158
SR
1061 /*
1062 * If this is a commit and the tail is zero, then update
1063 * this page's time stamp.
1064 */
1065 if (!tail && rb_is_commit(cpu_buffer, event))
abc9b56d 1066 cpu_buffer->commit_page->page->time_stamp = *ts;
bf41a158 1067
7a8e76a3 1068 return event;
bf41a158
SR
1069
1070 out_unlock:
3e03fb7f
SR
1071 __raw_spin_unlock(&cpu_buffer->lock);
1072 local_irq_restore(flags);
bf41a158 1073 return NULL;
7a8e76a3
SR
1074}
1075
1076static int
1077rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1078 u64 *ts, u64 *delta)
1079{
1080 struct ring_buffer_event *event;
1081 static int once;
bf41a158 1082 int ret;
7a8e76a3
SR
1083
1084 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1085 printk(KERN_WARNING "Delta way too big! %llu"
1086 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1087 (unsigned long long)*delta,
1088 (unsigned long long)*ts,
1089 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1090 WARN_ON(1);
1091 }
1092
1093 /*
1094 * The delta is too big, we to add a
1095 * new timestamp.
1096 */
1097 event = __rb_reserve_next(cpu_buffer,
1098 RINGBUF_TYPE_TIME_EXTEND,
1099 RB_LEN_TIME_EXTEND,
1100 ts);
1101 if (!event)
bf41a158 1102 return -EBUSY;
7a8e76a3 1103
bf41a158
SR
1104 if (PTR_ERR(event) == -EAGAIN)
1105 return -EAGAIN;
1106
1107 /* Only a commited time event can update the write stamp */
1108 if (rb_is_commit(cpu_buffer, event)) {
1109 /*
1110 * If this is the first on the page, then we need to
1111 * update the page itself, and just put in a zero.
1112 */
1113 if (rb_event_index(event)) {
1114 event->time_delta = *delta & TS_MASK;
1115 event->array[0] = *delta >> TS_SHIFT;
1116 } else {
abc9b56d 1117 cpu_buffer->commit_page->page->time_stamp = *ts;
bf41a158
SR
1118 event->time_delta = 0;
1119 event->array[0] = 0;
1120 }
7a8e76a3 1121 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1122 /* let the caller know this was the commit */
1123 ret = 1;
1124 } else {
1125 /* Darn, this is just wasted space */
1126 event->time_delta = 0;
1127 event->array[0] = 0;
1128 ret = 0;
7a8e76a3
SR
1129 }
1130
bf41a158
SR
1131 *delta = 0;
1132
1133 return ret;
7a8e76a3
SR
1134}
1135
1136static struct ring_buffer_event *
1137rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1138 unsigned type, unsigned long length)
1139{
1140 struct ring_buffer_event *event;
1141 u64 ts, delta;
bf41a158 1142 int commit = 0;
818e3dd3 1143 int nr_loops = 0;
7a8e76a3 1144
bf41a158 1145 again:
818e3dd3
SR
1146 /*
1147 * We allow for interrupts to reenter here and do a trace.
1148 * If one does, it will cause this original code to loop
1149 * back here. Even with heavy interrupts happening, this
1150 * should only happen a few times in a row. If this happens
1151 * 1000 times in a row, there must be either an interrupt
1152 * storm or we have something buggy.
1153 * Bail!
1154 */
3e89c7bb 1155 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
818e3dd3 1156 return NULL;
818e3dd3 1157
7a8e76a3
SR
1158 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1159
bf41a158
SR
1160 /*
1161 * Only the first commit can update the timestamp.
1162 * Yes there is a race here. If an interrupt comes in
1163 * just after the conditional and it traces too, then it
1164 * will also check the deltas. More than one timestamp may
1165 * also be made. But only the entry that did the actual
1166 * commit will be something other than zero.
1167 */
1168 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1169 rb_page_write(cpu_buffer->tail_page) ==
1170 rb_commit_index(cpu_buffer)) {
1171
7a8e76a3
SR
1172 delta = ts - cpu_buffer->write_stamp;
1173
bf41a158
SR
1174 /* make sure this delta is calculated here */
1175 barrier();
1176
1177 /* Did the write stamp get updated already? */
1178 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1179 delta = 0;
bf41a158 1180
7a8e76a3 1181 if (test_time_stamp(delta)) {
7a8e76a3 1182
bf41a158
SR
1183 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1184
1185 if (commit == -EBUSY)
7a8e76a3 1186 return NULL;
bf41a158
SR
1187
1188 if (commit == -EAGAIN)
1189 goto again;
1190
1191 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1192 }
bf41a158
SR
1193 } else
1194 /* Non commits have zero deltas */
7a8e76a3 1195 delta = 0;
7a8e76a3
SR
1196
1197 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1198 if (PTR_ERR(event) == -EAGAIN)
1199 goto again;
1200
1201 if (!event) {
1202 if (unlikely(commit))
1203 /*
1204 * Ouch! We needed a timestamp and it was commited. But
1205 * we didn't get our event reserved.
1206 */
1207 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1208 return NULL;
bf41a158 1209 }
7a8e76a3 1210
bf41a158
SR
1211 /*
1212 * If the timestamp was commited, make the commit our entry
1213 * now so that we will update it when needed.
1214 */
1215 if (commit)
1216 rb_set_commit_event(cpu_buffer, event);
1217 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1218 delta = 0;
1219
1220 event->time_delta = delta;
1221
1222 return event;
1223}
1224
bf41a158
SR
1225static DEFINE_PER_CPU(int, rb_need_resched);
1226
7a8e76a3
SR
1227/**
1228 * ring_buffer_lock_reserve - reserve a part of the buffer
1229 * @buffer: the ring buffer to reserve from
1230 * @length: the length of the data to reserve (excluding event header)
1231 * @flags: a pointer to save the interrupt flags
1232 *
1233 * Returns a reseverd event on the ring buffer to copy directly to.
1234 * The user of this interface will need to get the body to write into
1235 * and can use the ring_buffer_event_data() interface.
1236 *
1237 * The length is the length of the data needed, not the event length
1238 * which also includes the event header.
1239 *
1240 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1241 * If NULL is returned, then nothing has been allocated or locked.
1242 */
1243struct ring_buffer_event *
1244ring_buffer_lock_reserve(struct ring_buffer *buffer,
1245 unsigned long length,
1246 unsigned long *flags)
1247{
1248 struct ring_buffer_per_cpu *cpu_buffer;
1249 struct ring_buffer_event *event;
bf41a158 1250 int cpu, resched;
7a8e76a3 1251
033601a3 1252 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1253 return NULL;
1254
7a8e76a3
SR
1255 if (atomic_read(&buffer->record_disabled))
1256 return NULL;
1257
bf41a158 1258 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1259 resched = ftrace_preempt_disable();
bf41a158 1260
7a8e76a3
SR
1261 cpu = raw_smp_processor_id();
1262
1263 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1264 goto out;
7a8e76a3
SR
1265
1266 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1267
1268 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1269 goto out;
7a8e76a3
SR
1270
1271 length = rb_calculate_event_length(length);
1272 if (length > BUF_PAGE_SIZE)
bf41a158 1273 goto out;
7a8e76a3
SR
1274
1275 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1276 if (!event)
d769041f 1277 goto out;
7a8e76a3 1278
bf41a158
SR
1279 /*
1280 * Need to store resched state on this cpu.
1281 * Only the first needs to.
1282 */
1283
1284 if (preempt_count() == 1)
1285 per_cpu(rb_need_resched, cpu) = resched;
1286
7a8e76a3
SR
1287 return event;
1288
d769041f 1289 out:
182e9f5f 1290 ftrace_preempt_enable(resched);
7a8e76a3
SR
1291 return NULL;
1292}
1293
1294static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1295 struct ring_buffer_event *event)
1296{
7a8e76a3 1297 cpu_buffer->entries++;
bf41a158
SR
1298
1299 /* Only process further if we own the commit */
1300 if (!rb_is_commit(cpu_buffer, event))
1301 return;
1302
1303 cpu_buffer->write_stamp += event->time_delta;
1304
1305 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1306}
1307
1308/**
1309 * ring_buffer_unlock_commit - commit a reserved
1310 * @buffer: The buffer to commit to
1311 * @event: The event pointer to commit.
1312 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1313 *
1314 * This commits the data to the ring buffer, and releases any locks held.
1315 *
1316 * Must be paired with ring_buffer_lock_reserve.
1317 */
1318int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1319 struct ring_buffer_event *event,
1320 unsigned long flags)
1321{
1322 struct ring_buffer_per_cpu *cpu_buffer;
1323 int cpu = raw_smp_processor_id();
1324
1325 cpu_buffer = buffer->buffers[cpu];
1326
7a8e76a3
SR
1327 rb_commit(cpu_buffer, event);
1328
bf41a158
SR
1329 /*
1330 * Only the last preempt count needs to restore preemption.
1331 */
182e9f5f
SR
1332 if (preempt_count() == 1)
1333 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1334 else
bf41a158 1335 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1336
1337 return 0;
1338}
1339
1340/**
1341 * ring_buffer_write - write data to the buffer without reserving
1342 * @buffer: The ring buffer to write to.
1343 * @length: The length of the data being written (excluding the event header)
1344 * @data: The data to write to the buffer.
1345 *
1346 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1347 * one function. If you already have the data to write to the buffer, it
1348 * may be easier to simply call this function.
1349 *
1350 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1351 * and not the length of the event which would hold the header.
1352 */
1353int ring_buffer_write(struct ring_buffer *buffer,
1354 unsigned long length,
1355 void *data)
1356{
1357 struct ring_buffer_per_cpu *cpu_buffer;
1358 struct ring_buffer_event *event;
bf41a158 1359 unsigned long event_length;
7a8e76a3
SR
1360 void *body;
1361 int ret = -EBUSY;
bf41a158 1362 int cpu, resched;
7a8e76a3 1363
033601a3 1364 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1365 return -EBUSY;
1366
7a8e76a3
SR
1367 if (atomic_read(&buffer->record_disabled))
1368 return -EBUSY;
1369
182e9f5f 1370 resched = ftrace_preempt_disable();
bf41a158 1371
7a8e76a3
SR
1372 cpu = raw_smp_processor_id();
1373
1374 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1375 goto out;
7a8e76a3
SR
1376
1377 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1378
1379 if (atomic_read(&cpu_buffer->record_disabled))
1380 goto out;
1381
1382 event_length = rb_calculate_event_length(length);
1383 event = rb_reserve_next_event(cpu_buffer,
1384 RINGBUF_TYPE_DATA, event_length);
1385 if (!event)
1386 goto out;
1387
1388 body = rb_event_data(event);
1389
1390 memcpy(body, data, length);
1391
1392 rb_commit(cpu_buffer, event);
1393
1394 ret = 0;
1395 out:
182e9f5f 1396 ftrace_preempt_enable(resched);
7a8e76a3
SR
1397
1398 return ret;
1399}
1400
bf41a158
SR
1401static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1402{
1403 struct buffer_page *reader = cpu_buffer->reader_page;
1404 struct buffer_page *head = cpu_buffer->head_page;
1405 struct buffer_page *commit = cpu_buffer->commit_page;
1406
1407 return reader->read == rb_page_commit(reader) &&
1408 (commit == reader ||
1409 (commit == head &&
1410 head->read == rb_page_commit(commit)));
1411}
1412
7a8e76a3
SR
1413/**
1414 * ring_buffer_record_disable - stop all writes into the buffer
1415 * @buffer: The ring buffer to stop writes to.
1416 *
1417 * This prevents all writes to the buffer. Any attempt to write
1418 * to the buffer after this will fail and return NULL.
1419 *
1420 * The caller should call synchronize_sched() after this.
1421 */
1422void ring_buffer_record_disable(struct ring_buffer *buffer)
1423{
1424 atomic_inc(&buffer->record_disabled);
1425}
1426
1427/**
1428 * ring_buffer_record_enable - enable writes to the buffer
1429 * @buffer: The ring buffer to enable writes
1430 *
1431 * Note, multiple disables will need the same number of enables
1432 * to truely enable the writing (much like preempt_disable).
1433 */
1434void ring_buffer_record_enable(struct ring_buffer *buffer)
1435{
1436 atomic_dec(&buffer->record_disabled);
1437}
1438
1439/**
1440 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1441 * @buffer: The ring buffer to stop writes to.
1442 * @cpu: The CPU buffer to stop
1443 *
1444 * This prevents all writes to the buffer. Any attempt to write
1445 * to the buffer after this will fail and return NULL.
1446 *
1447 * The caller should call synchronize_sched() after this.
1448 */
1449void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1450{
1451 struct ring_buffer_per_cpu *cpu_buffer;
1452
1453 if (!cpu_isset(cpu, buffer->cpumask))
1454 return;
1455
1456 cpu_buffer = buffer->buffers[cpu];
1457 atomic_inc(&cpu_buffer->record_disabled);
1458}
1459
1460/**
1461 * ring_buffer_record_enable_cpu - enable writes to the buffer
1462 * @buffer: The ring buffer to enable writes
1463 * @cpu: The CPU to enable.
1464 *
1465 * Note, multiple disables will need the same number of enables
1466 * to truely enable the writing (much like preempt_disable).
1467 */
1468void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1469{
1470 struct ring_buffer_per_cpu *cpu_buffer;
1471
1472 if (!cpu_isset(cpu, buffer->cpumask))
1473 return;
1474
1475 cpu_buffer = buffer->buffers[cpu];
1476 atomic_dec(&cpu_buffer->record_disabled);
1477}
1478
1479/**
1480 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1481 * @buffer: The ring buffer
1482 * @cpu: The per CPU buffer to get the entries from.
1483 */
1484unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1485{
1486 struct ring_buffer_per_cpu *cpu_buffer;
1487
1488 if (!cpu_isset(cpu, buffer->cpumask))
1489 return 0;
1490
1491 cpu_buffer = buffer->buffers[cpu];
1492 return cpu_buffer->entries;
1493}
1494
1495/**
1496 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1497 * @buffer: The ring buffer
1498 * @cpu: The per CPU buffer to get the number of overruns from
1499 */
1500unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1501{
1502 struct ring_buffer_per_cpu *cpu_buffer;
1503
1504 if (!cpu_isset(cpu, buffer->cpumask))
1505 return 0;
1506
1507 cpu_buffer = buffer->buffers[cpu];
1508 return cpu_buffer->overrun;
1509}
1510
1511/**
1512 * ring_buffer_entries - get the number of entries in a buffer
1513 * @buffer: The ring buffer
1514 *
1515 * Returns the total number of entries in the ring buffer
1516 * (all CPU entries)
1517 */
1518unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1519{
1520 struct ring_buffer_per_cpu *cpu_buffer;
1521 unsigned long entries = 0;
1522 int cpu;
1523
1524 /* if you care about this being correct, lock the buffer */
1525 for_each_buffer_cpu(buffer, cpu) {
1526 cpu_buffer = buffer->buffers[cpu];
1527 entries += cpu_buffer->entries;
1528 }
1529
1530 return entries;
1531}
1532
1533/**
1534 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1535 * @buffer: The ring buffer
1536 *
1537 * Returns the total number of overruns in the ring buffer
1538 * (all CPU entries)
1539 */
1540unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1541{
1542 struct ring_buffer_per_cpu *cpu_buffer;
1543 unsigned long overruns = 0;
1544 int cpu;
1545
1546 /* if you care about this being correct, lock the buffer */
1547 for_each_buffer_cpu(buffer, cpu) {
1548 cpu_buffer = buffer->buffers[cpu];
1549 overruns += cpu_buffer->overrun;
1550 }
1551
1552 return overruns;
1553}
1554
642edba5 1555static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
1556{
1557 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1558
d769041f
SR
1559 /* Iterator usage is expected to have record disabled */
1560 if (list_empty(&cpu_buffer->reader_page->list)) {
1561 iter->head_page = cpu_buffer->head_page;
6f807acd 1562 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1563 } else {
1564 iter->head_page = cpu_buffer->reader_page;
6f807acd 1565 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1566 }
1567 if (iter->head)
1568 iter->read_stamp = cpu_buffer->read_stamp;
1569 else
abc9b56d 1570 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 1571}
f83c9d0f 1572
642edba5
SR
1573/**
1574 * ring_buffer_iter_reset - reset an iterator
1575 * @iter: The iterator to reset
1576 *
1577 * Resets the iterator, so that it will start from the beginning
1578 * again.
1579 */
1580void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1581{
1582 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1583 unsigned long flags;
1584
1585 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1586 rb_iter_reset(iter);
f83c9d0f 1587 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
1588}
1589
1590/**
1591 * ring_buffer_iter_empty - check if an iterator has no more to read
1592 * @iter: The iterator to check
1593 */
1594int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1595{
1596 struct ring_buffer_per_cpu *cpu_buffer;
1597
1598 cpu_buffer = iter->cpu_buffer;
1599
bf41a158
SR
1600 return iter->head_page == cpu_buffer->commit_page &&
1601 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3
SR
1602}
1603
1604static void
1605rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1606 struct ring_buffer_event *event)
1607{
1608 u64 delta;
1609
1610 switch (event->type) {
1611 case RINGBUF_TYPE_PADDING:
1612 return;
1613
1614 case RINGBUF_TYPE_TIME_EXTEND:
1615 delta = event->array[0];
1616 delta <<= TS_SHIFT;
1617 delta += event->time_delta;
1618 cpu_buffer->read_stamp += delta;
1619 return;
1620
1621 case RINGBUF_TYPE_TIME_STAMP:
1622 /* FIXME: not implemented */
1623 return;
1624
1625 case RINGBUF_TYPE_DATA:
1626 cpu_buffer->read_stamp += event->time_delta;
1627 return;
1628
1629 default:
1630 BUG();
1631 }
1632 return;
1633}
1634
1635static void
1636rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1637 struct ring_buffer_event *event)
1638{
1639 u64 delta;
1640
1641 switch (event->type) {
1642 case RINGBUF_TYPE_PADDING:
1643 return;
1644
1645 case RINGBUF_TYPE_TIME_EXTEND:
1646 delta = event->array[0];
1647 delta <<= TS_SHIFT;
1648 delta += event->time_delta;
1649 iter->read_stamp += delta;
1650 return;
1651
1652 case RINGBUF_TYPE_TIME_STAMP:
1653 /* FIXME: not implemented */
1654 return;
1655
1656 case RINGBUF_TYPE_DATA:
1657 iter->read_stamp += event->time_delta;
1658 return;
1659
1660 default:
1661 BUG();
1662 }
1663 return;
1664}
1665
d769041f
SR
1666static struct buffer_page *
1667rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1668{
d769041f
SR
1669 struct buffer_page *reader = NULL;
1670 unsigned long flags;
818e3dd3 1671 int nr_loops = 0;
d769041f 1672
3e03fb7f
SR
1673 local_irq_save(flags);
1674 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
1675
1676 again:
818e3dd3
SR
1677 /*
1678 * This should normally only loop twice. But because the
1679 * start of the reader inserts an empty page, it causes
1680 * a case where we will loop three times. There should be no
1681 * reason to loop four times (that I know of).
1682 */
3e89c7bb 1683 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
1684 reader = NULL;
1685 goto out;
1686 }
1687
d769041f
SR
1688 reader = cpu_buffer->reader_page;
1689
1690 /* If there's more to read, return this page */
bf41a158 1691 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
1692 goto out;
1693
1694 /* Never should we have an index greater than the size */
3e89c7bb
SR
1695 if (RB_WARN_ON(cpu_buffer,
1696 cpu_buffer->reader_page->read > rb_page_size(reader)))
1697 goto out;
d769041f
SR
1698
1699 /* check if we caught up to the tail */
1700 reader = NULL;
bf41a158 1701 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 1702 goto out;
7a8e76a3
SR
1703
1704 /*
d769041f
SR
1705 * Splice the empty reader page into the list around the head.
1706 * Reset the reader page to size zero.
7a8e76a3 1707 */
7a8e76a3 1708
d769041f
SR
1709 reader = cpu_buffer->head_page;
1710 cpu_buffer->reader_page->list.next = reader->list.next;
1711 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
1712
1713 local_set(&cpu_buffer->reader_page->write, 0);
abc9b56d 1714 local_set(&cpu_buffer->reader_page->page->commit, 0);
7a8e76a3 1715
d769041f
SR
1716 /* Make the reader page now replace the head */
1717 reader->list.prev->next = &cpu_buffer->reader_page->list;
1718 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
1719
1720 /*
d769041f
SR
1721 * If the tail is on the reader, then we must set the head
1722 * to the inserted page, otherwise we set it one before.
7a8e76a3 1723 */
d769041f 1724 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 1725
bf41a158 1726 if (cpu_buffer->commit_page != reader)
d769041f
SR
1727 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1728
1729 /* Finally update the reader page to the new head */
1730 cpu_buffer->reader_page = reader;
1731 rb_reset_reader_page(cpu_buffer);
1732
1733 goto again;
1734
1735 out:
3e03fb7f
SR
1736 __raw_spin_unlock(&cpu_buffer->lock);
1737 local_irq_restore(flags);
d769041f
SR
1738
1739 return reader;
1740}
1741
1742static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1743{
1744 struct ring_buffer_event *event;
1745 struct buffer_page *reader;
1746 unsigned length;
1747
1748 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 1749
d769041f 1750 /* This function should not be called when buffer is empty */
3e89c7bb
SR
1751 if (RB_WARN_ON(cpu_buffer, !reader))
1752 return;
7a8e76a3 1753
d769041f
SR
1754 event = rb_reader_event(cpu_buffer);
1755
1756 if (event->type == RINGBUF_TYPE_DATA)
1757 cpu_buffer->entries--;
1758
1759 rb_update_read_stamp(cpu_buffer, event);
1760
1761 length = rb_event_length(event);
6f807acd 1762 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
1763}
1764
1765static void rb_advance_iter(struct ring_buffer_iter *iter)
1766{
1767 struct ring_buffer *buffer;
1768 struct ring_buffer_per_cpu *cpu_buffer;
1769 struct ring_buffer_event *event;
1770 unsigned length;
1771
1772 cpu_buffer = iter->cpu_buffer;
1773 buffer = cpu_buffer->buffer;
1774
1775 /*
1776 * Check if we are at the end of the buffer.
1777 */
bf41a158 1778 if (iter->head >= rb_page_size(iter->head_page)) {
3e89c7bb
SR
1779 if (RB_WARN_ON(buffer,
1780 iter->head_page == cpu_buffer->commit_page))
1781 return;
d769041f 1782 rb_inc_iter(iter);
7a8e76a3
SR
1783 return;
1784 }
1785
1786 event = rb_iter_head_event(iter);
1787
1788 length = rb_event_length(event);
1789
1790 /*
1791 * This should not be called to advance the header if we are
1792 * at the tail of the buffer.
1793 */
3e89c7bb 1794 if (RB_WARN_ON(cpu_buffer,
f536aafc 1795 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
1796 (iter->head + length > rb_commit_index(cpu_buffer))))
1797 return;
7a8e76a3
SR
1798
1799 rb_update_iter_read_stamp(iter, event);
1800
1801 iter->head += length;
1802
1803 /* check for end of page padding */
bf41a158
SR
1804 if ((iter->head >= rb_page_size(iter->head_page)) &&
1805 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
1806 rb_advance_iter(iter);
1807}
1808
f83c9d0f
SR
1809static struct ring_buffer_event *
1810rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
7a8e76a3
SR
1811{
1812 struct ring_buffer_per_cpu *cpu_buffer;
1813 struct ring_buffer_event *event;
d769041f 1814 struct buffer_page *reader;
818e3dd3 1815 int nr_loops = 0;
7a8e76a3
SR
1816
1817 if (!cpu_isset(cpu, buffer->cpumask))
1818 return NULL;
1819
1820 cpu_buffer = buffer->buffers[cpu];
1821
1822 again:
818e3dd3
SR
1823 /*
1824 * We repeat when a timestamp is encountered. It is possible
1825 * to get multiple timestamps from an interrupt entering just
1826 * as one timestamp is about to be written. The max times
1827 * that this can happen is the number of nested interrupts we
1828 * can have. Nesting 10 deep of interrupts is clearly
1829 * an anomaly.
1830 */
3e89c7bb 1831 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
818e3dd3 1832 return NULL;
818e3dd3 1833
d769041f
SR
1834 reader = rb_get_reader_page(cpu_buffer);
1835 if (!reader)
7a8e76a3
SR
1836 return NULL;
1837
d769041f 1838 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
1839
1840 switch (event->type) {
1841 case RINGBUF_TYPE_PADDING:
bf41a158 1842 RB_WARN_ON(cpu_buffer, 1);
d769041f
SR
1843 rb_advance_reader(cpu_buffer);
1844 return NULL;
7a8e76a3
SR
1845
1846 case RINGBUF_TYPE_TIME_EXTEND:
1847 /* Internal data, OK to advance */
d769041f 1848 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1849 goto again;
1850
1851 case RINGBUF_TYPE_TIME_STAMP:
1852 /* FIXME: not implemented */
d769041f 1853 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1854 goto again;
1855
1856 case RINGBUF_TYPE_DATA:
1857 if (ts) {
1858 *ts = cpu_buffer->read_stamp + event->time_delta;
1859 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1860 }
1861 return event;
1862
1863 default:
1864 BUG();
1865 }
1866
1867 return NULL;
1868}
1869
f83c9d0f
SR
1870static struct ring_buffer_event *
1871rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
1872{
1873 struct ring_buffer *buffer;
1874 struct ring_buffer_per_cpu *cpu_buffer;
1875 struct ring_buffer_event *event;
818e3dd3 1876 int nr_loops = 0;
7a8e76a3
SR
1877
1878 if (ring_buffer_iter_empty(iter))
1879 return NULL;
1880
1881 cpu_buffer = iter->cpu_buffer;
1882 buffer = cpu_buffer->buffer;
1883
1884 again:
818e3dd3
SR
1885 /*
1886 * We repeat when a timestamp is encountered. It is possible
1887 * to get multiple timestamps from an interrupt entering just
1888 * as one timestamp is about to be written. The max times
1889 * that this can happen is the number of nested interrupts we
1890 * can have. Nesting 10 deep of interrupts is clearly
1891 * an anomaly.
1892 */
3e89c7bb 1893 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
818e3dd3 1894 return NULL;
818e3dd3 1895
7a8e76a3
SR
1896 if (rb_per_cpu_empty(cpu_buffer))
1897 return NULL;
1898
1899 event = rb_iter_head_event(iter);
1900
1901 switch (event->type) {
1902 case RINGBUF_TYPE_PADDING:
d769041f 1903 rb_inc_iter(iter);
7a8e76a3
SR
1904 goto again;
1905
1906 case RINGBUF_TYPE_TIME_EXTEND:
1907 /* Internal data, OK to advance */
1908 rb_advance_iter(iter);
1909 goto again;
1910
1911 case RINGBUF_TYPE_TIME_STAMP:
1912 /* FIXME: not implemented */
1913 rb_advance_iter(iter);
1914 goto again;
1915
1916 case RINGBUF_TYPE_DATA:
1917 if (ts) {
1918 *ts = iter->read_stamp + event->time_delta;
1919 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1920 }
1921 return event;
1922
1923 default:
1924 BUG();
1925 }
1926
1927 return NULL;
1928}
1929
f83c9d0f
SR
1930/**
1931 * ring_buffer_peek - peek at the next event to be read
1932 * @buffer: The ring buffer to read
1933 * @cpu: The cpu to peak at
1934 * @ts: The timestamp counter of this event.
1935 *
1936 * This will return the event that will be read next, but does
1937 * not consume the data.
1938 */
1939struct ring_buffer_event *
1940ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1941{
1942 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1943 struct ring_buffer_event *event;
1944 unsigned long flags;
1945
1946 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1947 event = rb_buffer_peek(buffer, cpu, ts);
1948 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1949
1950 return event;
1951}
1952
1953/**
1954 * ring_buffer_iter_peek - peek at the next event to be read
1955 * @iter: The ring buffer iterator
1956 * @ts: The timestamp counter of this event.
1957 *
1958 * This will return the event that will be read next, but does
1959 * not increment the iterator.
1960 */
1961struct ring_buffer_event *
1962ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1963{
1964 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1965 struct ring_buffer_event *event;
1966 unsigned long flags;
1967
1968 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1969 event = rb_iter_peek(iter, ts);
1970 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1971
1972 return event;
1973}
1974
7a8e76a3
SR
1975/**
1976 * ring_buffer_consume - return an event and consume it
1977 * @buffer: The ring buffer to get the next event from
1978 *
1979 * Returns the next event in the ring buffer, and that event is consumed.
1980 * Meaning, that sequential reads will keep returning a different event,
1981 * and eventually empty the ring buffer if the producer is slower.
1982 */
1983struct ring_buffer_event *
1984ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1985{
f83c9d0f 1986 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
7a8e76a3 1987 struct ring_buffer_event *event;
f83c9d0f 1988 unsigned long flags;
7a8e76a3
SR
1989
1990 if (!cpu_isset(cpu, buffer->cpumask))
1991 return NULL;
1992
f83c9d0f
SR
1993 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1994
1995 event = rb_buffer_peek(buffer, cpu, ts);
7a8e76a3 1996 if (!event)
f83c9d0f 1997 goto out;
7a8e76a3 1998
d769041f 1999 rb_advance_reader(cpu_buffer);
7a8e76a3 2000
f83c9d0f
SR
2001 out:
2002 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2003
7a8e76a3
SR
2004 return event;
2005}
2006
2007/**
2008 * ring_buffer_read_start - start a non consuming read of the buffer
2009 * @buffer: The ring buffer to read from
2010 * @cpu: The cpu buffer to iterate over
2011 *
2012 * This starts up an iteration through the buffer. It also disables
2013 * the recording to the buffer until the reading is finished.
2014 * This prevents the reading from being corrupted. This is not
2015 * a consuming read, so a producer is not expected.
2016 *
2017 * Must be paired with ring_buffer_finish.
2018 */
2019struct ring_buffer_iter *
2020ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2021{
2022 struct ring_buffer_per_cpu *cpu_buffer;
2023 struct ring_buffer_iter *iter;
d769041f 2024 unsigned long flags;
7a8e76a3
SR
2025
2026 if (!cpu_isset(cpu, buffer->cpumask))
2027 return NULL;
2028
2029 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2030 if (!iter)
2031 return NULL;
2032
2033 cpu_buffer = buffer->buffers[cpu];
2034
2035 iter->cpu_buffer = cpu_buffer;
2036
2037 atomic_inc(&cpu_buffer->record_disabled);
2038 synchronize_sched();
2039
f83c9d0f 2040 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3e03fb7f 2041 __raw_spin_lock(&cpu_buffer->lock);
642edba5 2042 rb_iter_reset(iter);
3e03fb7f 2043 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 2044 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2045
2046 return iter;
2047}
2048
2049/**
2050 * ring_buffer_finish - finish reading the iterator of the buffer
2051 * @iter: The iterator retrieved by ring_buffer_start
2052 *
2053 * This re-enables the recording to the buffer, and frees the
2054 * iterator.
2055 */
2056void
2057ring_buffer_read_finish(struct ring_buffer_iter *iter)
2058{
2059 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2060
2061 atomic_dec(&cpu_buffer->record_disabled);
2062 kfree(iter);
2063}
2064
2065/**
2066 * ring_buffer_read - read the next item in the ring buffer by the iterator
2067 * @iter: The ring buffer iterator
2068 * @ts: The time stamp of the event read.
2069 *
2070 * This reads the next event in the ring buffer and increments the iterator.
2071 */
2072struct ring_buffer_event *
2073ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2074{
2075 struct ring_buffer_event *event;
f83c9d0f
SR
2076 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2077 unsigned long flags;
7a8e76a3 2078
f83c9d0f
SR
2079 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2080 event = rb_iter_peek(iter, ts);
7a8e76a3 2081 if (!event)
f83c9d0f 2082 goto out;
7a8e76a3
SR
2083
2084 rb_advance_iter(iter);
f83c9d0f
SR
2085 out:
2086 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2087
2088 return event;
2089}
2090
2091/**
2092 * ring_buffer_size - return the size of the ring buffer (in bytes)
2093 * @buffer: The ring buffer.
2094 */
2095unsigned long ring_buffer_size(struct ring_buffer *buffer)
2096{
2097 return BUF_PAGE_SIZE * buffer->pages;
2098}
2099
2100static void
2101rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2102{
2103 cpu_buffer->head_page
2104 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 2105 local_set(&cpu_buffer->head_page->write, 0);
abc9b56d 2106 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 2107
6f807acd 2108 cpu_buffer->head_page->read = 0;
bf41a158
SR
2109
2110 cpu_buffer->tail_page = cpu_buffer->head_page;
2111 cpu_buffer->commit_page = cpu_buffer->head_page;
2112
2113 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2114 local_set(&cpu_buffer->reader_page->write, 0);
abc9b56d 2115 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 2116 cpu_buffer->reader_page->read = 0;
7a8e76a3 2117
7a8e76a3
SR
2118 cpu_buffer->overrun = 0;
2119 cpu_buffer->entries = 0;
2120}
2121
2122/**
2123 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2124 * @buffer: The ring buffer to reset a per cpu buffer of
2125 * @cpu: The CPU buffer to be reset
2126 */
2127void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2128{
2129 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2130 unsigned long flags;
2131
2132 if (!cpu_isset(cpu, buffer->cpumask))
2133 return;
2134
f83c9d0f
SR
2135 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2136
3e03fb7f 2137 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
2138
2139 rb_reset_cpu(cpu_buffer);
2140
3e03fb7f 2141 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f
SR
2142
2143 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2144}
2145
2146/**
2147 * ring_buffer_reset - reset a ring buffer
2148 * @buffer: The ring buffer to reset all cpu buffers
2149 */
2150void ring_buffer_reset(struct ring_buffer *buffer)
2151{
7a8e76a3
SR
2152 int cpu;
2153
7a8e76a3 2154 for_each_buffer_cpu(buffer, cpu)
d769041f 2155 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3
SR
2156}
2157
2158/**
2159 * rind_buffer_empty - is the ring buffer empty?
2160 * @buffer: The ring buffer to test
2161 */
2162int ring_buffer_empty(struct ring_buffer *buffer)
2163{
2164 struct ring_buffer_per_cpu *cpu_buffer;
2165 int cpu;
2166
2167 /* yes this is racy, but if you don't like the race, lock the buffer */
2168 for_each_buffer_cpu(buffer, cpu) {
2169 cpu_buffer = buffer->buffers[cpu];
2170 if (!rb_per_cpu_empty(cpu_buffer))
2171 return 0;
2172 }
2173 return 1;
2174}
2175
2176/**
2177 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2178 * @buffer: The ring buffer
2179 * @cpu: The CPU buffer to test
2180 */
2181int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2182{
2183 struct ring_buffer_per_cpu *cpu_buffer;
2184
2185 if (!cpu_isset(cpu, buffer->cpumask))
2186 return 1;
2187
2188 cpu_buffer = buffer->buffers[cpu];
2189 return rb_per_cpu_empty(cpu_buffer);
2190}
2191
2192/**
2193 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2194 * @buffer_a: One buffer to swap with
2195 * @buffer_b: The other buffer to swap with
2196 *
2197 * This function is useful for tracers that want to take a "snapshot"
2198 * of a CPU buffer and has another back up buffer lying around.
2199 * it is expected that the tracer handles the cpu buffer not being
2200 * used at the moment.
2201 */
2202int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2203 struct ring_buffer *buffer_b, int cpu)
2204{
2205 struct ring_buffer_per_cpu *cpu_buffer_a;
2206 struct ring_buffer_per_cpu *cpu_buffer_b;
2207
2208 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2209 !cpu_isset(cpu, buffer_b->cpumask))
2210 return -EINVAL;
2211
2212 /* At least make sure the two buffers are somewhat the same */
2213 if (buffer_a->size != buffer_b->size ||
2214 buffer_a->pages != buffer_b->pages)
2215 return -EINVAL;
2216
2217 cpu_buffer_a = buffer_a->buffers[cpu];
2218 cpu_buffer_b = buffer_b->buffers[cpu];
2219
2220 /*
2221 * We can't do a synchronize_sched here because this
2222 * function can be called in atomic context.
2223 * Normally this will be called from the same CPU as cpu.
2224 * If not it's up to the caller to protect this.
2225 */
2226 atomic_inc(&cpu_buffer_a->record_disabled);
2227 atomic_inc(&cpu_buffer_b->record_disabled);
2228
2229 buffer_a->buffers[cpu] = cpu_buffer_b;
2230 buffer_b->buffers[cpu] = cpu_buffer_a;
2231
2232 cpu_buffer_b->buffer = buffer_a;
2233 cpu_buffer_a->buffer = buffer_b;
2234
2235 atomic_dec(&cpu_buffer_a->record_disabled);
2236 atomic_dec(&cpu_buffer_b->record_disabled);
2237
2238 return 0;
2239}
2240
8789a9e7
SR
2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2242 struct buffer_data_page *page)
2243{
2244 struct ring_buffer_event *event;
2245 unsigned long head;
2246
2247 __raw_spin_lock(&cpu_buffer->lock);
2248 for (head = 0; head < local_read(&page->commit);
2249 head += rb_event_length(event)) {
2250
2251 event = __rb_data_page_index(page, head);
2252 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2253 return;
2254 /* Only count data entries */
2255 if (event->type != RINGBUF_TYPE_DATA)
2256 continue;
2257 cpu_buffer->entries--;
2258 }
2259 __raw_spin_unlock(&cpu_buffer->lock);
2260}
2261
2262/**
2263 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2264 * @buffer: the buffer to allocate for.
2265 *
2266 * This function is used in conjunction with ring_buffer_read_page.
2267 * When reading a full page from the ring buffer, these functions
2268 * can be used to speed up the process. The calling function should
2269 * allocate a few pages first with this function. Then when it
2270 * needs to get pages from the ring buffer, it passes the result
2271 * of this function into ring_buffer_read_page, which will swap
2272 * the page that was allocated, with the read page of the buffer.
2273 *
2274 * Returns:
2275 * The page allocated, or NULL on error.
2276 */
2277void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2278{
2279 unsigned long addr;
2280 struct buffer_data_page *page;
2281
2282 addr = __get_free_page(GFP_KERNEL);
2283 if (!addr)
2284 return NULL;
2285
2286 page = (void *)addr;
2287
2288 return page;
2289}
2290
2291/**
2292 * ring_buffer_free_read_page - free an allocated read page
2293 * @buffer: the buffer the page was allocate for
2294 * @data: the page to free
2295 *
2296 * Free a page allocated from ring_buffer_alloc_read_page.
2297 */
2298void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2299{
2300 free_page((unsigned long)data);
2301}
2302
2303/**
2304 * ring_buffer_read_page - extract a page from the ring buffer
2305 * @buffer: buffer to extract from
2306 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2307 * @cpu: the cpu of the buffer to extract
2308 * @full: should the extraction only happen when the page is full.
2309 *
2310 * This function will pull out a page from the ring buffer and consume it.
2311 * @data_page must be the address of the variable that was returned
2312 * from ring_buffer_alloc_read_page. This is because the page might be used
2313 * to swap with a page in the ring buffer.
2314 *
2315 * for example:
2316 * rpage = ring_buffer_alloc_page(buffer);
2317 * if (!rpage)
2318 * return error;
2319 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2320 * if (ret)
2321 * process_page(rpage);
2322 *
2323 * When @full is set, the function will not return true unless
2324 * the writer is off the reader page.
2325 *
2326 * Note: it is up to the calling functions to handle sleeps and wakeups.
2327 * The ring buffer can be used anywhere in the kernel and can not
2328 * blindly call wake_up. The layer that uses the ring buffer must be
2329 * responsible for that.
2330 *
2331 * Returns:
2332 * 1 if data has been transferred
2333 * 0 if no data has been transferred.
2334 */
2335int ring_buffer_read_page(struct ring_buffer *buffer,
2336 void **data_page, int cpu, int full)
2337{
2338 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2339 struct ring_buffer_event *event;
2340 struct buffer_data_page *page;
2341 unsigned long flags;
2342 int ret = 0;
2343
2344 if (!data_page)
2345 return 0;
2346
2347 page = *data_page;
2348 if (!page)
2349 return 0;
2350
2351 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2352
2353 /*
2354 * rb_buffer_peek will get the next ring buffer if
2355 * the current reader page is empty.
2356 */
2357 event = rb_buffer_peek(buffer, cpu, NULL);
2358 if (!event)
2359 goto out;
2360
2361 /* check for data */
2362 if (!local_read(&cpu_buffer->reader_page->page->commit))
2363 goto out;
2364 /*
2365 * If the writer is already off of the read page, then simply
2366 * switch the read page with the given page. Otherwise
2367 * we need to copy the data from the reader to the writer.
2368 */
2369 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2370 unsigned int read = cpu_buffer->reader_page->read;
2371
2372 if (full)
2373 goto out;
2374 /* The writer is still on the reader page, we must copy */
2375 page = cpu_buffer->reader_page->page;
2376 memcpy(page->data,
2377 cpu_buffer->reader_page->page->data + read,
2378 local_read(&page->commit) - read);
2379
2380 /* consume what was read */
2381 cpu_buffer->reader_page += read;
2382
2383 } else {
2384 /* swap the pages */
2385 rb_init_page(page);
2386 page = cpu_buffer->reader_page->page;
2387 cpu_buffer->reader_page->page = *data_page;
2388 cpu_buffer->reader_page->read = 0;
2389 *data_page = page;
2390 }
2391 ret = 1;
2392
2393 /* update the entry counter */
2394 rb_remove_entries(cpu_buffer, page);
2395 out:
2396 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2397
2398 return ret;
2399}
2400
a3583244
SR
2401static ssize_t
2402rb_simple_read(struct file *filp, char __user *ubuf,
2403 size_t cnt, loff_t *ppos)
2404{
033601a3 2405 long *p = filp->private_data;
a3583244
SR
2406 char buf[64];
2407 int r;
2408
033601a3
SR
2409 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2410 r = sprintf(buf, "permanently disabled\n");
2411 else
2412 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
2413
2414 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2415}
2416
2417static ssize_t
2418rb_simple_write(struct file *filp, const char __user *ubuf,
2419 size_t cnt, loff_t *ppos)
2420{
033601a3 2421 long *p = filp->private_data;
a3583244
SR
2422 char buf[64];
2423 long val;
2424 int ret;
2425
2426 if (cnt >= sizeof(buf))
2427 return -EINVAL;
2428
2429 if (copy_from_user(&buf, ubuf, cnt))
2430 return -EFAULT;
2431
2432 buf[cnt] = 0;
2433
2434 ret = strict_strtoul(buf, 10, &val);
2435 if (ret < 0)
2436 return ret;
2437
033601a3
SR
2438 if (val)
2439 set_bit(RB_BUFFERS_ON_BIT, p);
2440 else
2441 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
2442
2443 (*ppos)++;
2444
2445 return cnt;
2446}
2447
2448static struct file_operations rb_simple_fops = {
2449 .open = tracing_open_generic,
2450 .read = rb_simple_read,
2451 .write = rb_simple_write,
2452};
2453
2454
2455static __init int rb_init_debugfs(void)
2456{
2457 struct dentry *d_tracer;
2458 struct dentry *entry;
2459
2460 d_tracer = tracing_init_dentry();
2461
2462 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
033601a3 2463 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
2464 if (!entry)
2465 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2466
2467 return 0;
2468}
2469
2470fs_initcall(rb_init_debugfs);