ring-buffer: Wrap a list.next reference with rb_list_head()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
14131f2f 7#include <linux/trace_clock.h>
78d904b4 8#include <linux/ftrace_irq.h>
7a8e76a3
SR
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
a81bd80a 12#include <linux/hardirq.h>
1744a21d 13#include <linux/kmemcheck.h>
7a8e76a3
SR
14#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
7a8e76a3
SR
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
554f786e 20#include <linux/cpu.h>
7a8e76a3
SR
21#include <linux/fs.h>
22
182e9f5f
SR
23#include "trace.h"
24
d1b182a8
SR
25/*
26 * The ring buffer header is special. We must manually up keep it.
27 */
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
334d4169
LJ
32 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
d1b182a8
SR
34 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
334d4169
LJ
41 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
d1b182a8
SR
43
44 return ret;
45}
46
5cc98548
SR
47/*
48 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
033601a3
SR
115/*
116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133/*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
5e39841c 154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 155
474d32b6
SR
156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
a3583244
SR
158/**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164void tracing_on(void)
165{
033601a3 166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244 167}
c4f50183 168EXPORT_SYMBOL_GPL(tracing_on);
a3583244
SR
169
170/**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178void tracing_off(void)
179{
033601a3
SR
180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
c4f50183 182EXPORT_SYMBOL_GPL(tracing_off);
033601a3
SR
183
184/**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
c3706f00 188 * permanently.
033601a3
SR
189 */
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
193}
194
988ae9d6
SR
195/**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
e3d6bf0a 204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 205#define RB_ALIGNMENT 4U
334d4169 206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 207#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
334d4169
LJ
208
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
2d622719
TZ
217static inline int rb_null_event(struct ring_buffer_event *event)
218{
a1863c21 219 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
220}
221
222static void rb_event_set_padding(struct ring_buffer_event *event)
223{
a1863c21 224 /* padding has a NULL time_delta */
334d4169 225 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
226 event->time_delta = 0;
227}
228
34a148bf 229static unsigned
2d622719 230rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
231{
232 unsigned length;
233
334d4169
LJ
234 if (event->type_len)
235 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
240
241/* inline for ring buffer fast paths */
242static unsigned
243rb_event_length(struct ring_buffer_event *event)
244{
334d4169 245 switch (event->type_len) {
7a8e76a3 246 case RINGBUF_TYPE_PADDING:
2d622719
TZ
247 if (rb_null_event(event))
248 /* undefined */
249 return -1;
334d4169 250 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
2d622719 259 return rb_event_data_length(event);
7a8e76a3
SR
260 default:
261 BUG();
262 }
263 /* not hit */
264 return 0;
265}
266
267/**
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
270 */
271unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272{
465634ad 273 unsigned length = rb_event_length(event);
334d4169 274 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
7a8e76a3 280}
c4f50183 281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
282
283/* inline for ring buffer fast paths */
34a148bf 284static void *
7a8e76a3
SR
285rb_event_data(struct ring_buffer_event *event)
286{
334d4169 287 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 288 /* If length is in len field, then array[0] has the data */
334d4169 289 if (event->type_len)
7a8e76a3
SR
290 return (void *)&event->array[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event->array[1];
293}
294
295/**
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
298 */
299void *ring_buffer_event_data(struct ring_buffer_event *event)
300{
301 return rb_event_data(event);
302}
c4f50183 303EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
304
305#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 306 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
307
308#define TS_SHIFT 27
309#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310#define TS_DELTA_TEST (~TS_MASK)
311
abc9b56d 312struct buffer_data_page {
e4c2ce82 313 u64 time_stamp; /* page time stamp */
c3706f00 314 local_t commit; /* write committed index */
abc9b56d
SR
315 unsigned char data[]; /* data of buffer page */
316};
317
77ae365e
SR
318/*
319 * Note, the buffer_page list must be first. The buffer pages
320 * are allocated in cache lines, which means that each buffer
321 * page will be at the beginning of a cache line, and thus
322 * the least significant bits will be zero. We use this to
323 * add flags in the list struct pointers, to make the ring buffer
324 * lockless.
325 */
abc9b56d 326struct buffer_page {
778c55d4 327 struct list_head list; /* list of buffer pages */
abc9b56d 328 local_t write; /* index for next write */
6f807acd 329 unsigned read; /* index for next read */
778c55d4 330 local_t entries; /* entries on this page */
abc9b56d 331 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
332};
333
77ae365e
SR
334/*
335 * The buffer page counters, write and entries, must be reset
336 * atomically when crossing page boundaries. To synchronize this
337 * update, two counters are inserted into the number. One is
338 * the actual counter for the write position or count on the page.
339 *
340 * The other is a counter of updaters. Before an update happens
341 * the update partition of the counter is incremented. This will
342 * allow the updater to update the counter atomically.
343 *
344 * The counter is 20 bits, and the state data is 12.
345 */
346#define RB_WRITE_MASK 0xfffff
347#define RB_WRITE_INTCNT (1 << 20)
348
044fa782 349static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 350{
044fa782 351 local_set(&bpage->commit, 0);
abc9b56d
SR
352}
353
474d32b6
SR
354/**
355 * ring_buffer_page_len - the size of data on the page.
356 * @page: The page to read
357 *
358 * Returns the amount of data on the page, including buffer page header.
359 */
ef7a4a16
SR
360size_t ring_buffer_page_len(void *page)
361{
474d32b6
SR
362 return local_read(&((struct buffer_data_page *)page)->commit)
363 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
364}
365
ed56829c
SR
366/*
367 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
368 * this issue out.
369 */
34a148bf 370static void free_buffer_page(struct buffer_page *bpage)
ed56829c 371{
34a148bf 372 free_page((unsigned long)bpage->page);
e4c2ce82 373 kfree(bpage);
ed56829c
SR
374}
375
7a8e76a3
SR
376/*
377 * We need to fit the time_stamp delta into 27 bits.
378 */
379static inline int test_time_stamp(u64 delta)
380{
381 if (delta & TS_DELTA_TEST)
382 return 1;
383 return 0;
384}
385
474d32b6 386#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 387
be957c44
SR
388/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
390
ea05b57c
SR
391/* Max number of timestamps that can fit on a page */
392#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
393
d1b182a8
SR
394int ring_buffer_print_page_header(struct trace_seq *s)
395{
396 struct buffer_data_page field;
397 int ret;
398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
26a50744
TZ
400 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp),
402 (unsigned int)is_signed_type(u64));
d1b182a8
SR
403
404 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
26a50744 405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 406 (unsigned int)offsetof(typeof(field), commit),
26a50744
TZ
407 (unsigned int)sizeof(field.commit),
408 (unsigned int)is_signed_type(long));
d1b182a8
SR
409
410 ret = trace_seq_printf(s, "\tfield: char data;\t"
26a50744 411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 412 (unsigned int)offsetof(typeof(field), data),
26a50744
TZ
413 (unsigned int)BUF_PAGE_SIZE,
414 (unsigned int)is_signed_type(char));
d1b182a8
SR
415
416 return ret;
417}
418
7a8e76a3
SR
419/*
420 * head_page == tail_page && head == tail then buffer is empty.
421 */
422struct ring_buffer_per_cpu {
423 int cpu;
424 struct ring_buffer *buffer;
77ae365e 425 spinlock_t reader_lock; /* serialize readers */
445c8951 426 arch_spinlock_t lock;
7a8e76a3 427 struct lock_class_key lock_key;
3adc54fa 428 struct list_head *pages;
6f807acd
SR
429 struct buffer_page *head_page; /* read from head */
430 struct buffer_page *tail_page; /* write to tail */
c3706f00 431 struct buffer_page *commit_page; /* committed pages */
d769041f 432 struct buffer_page *reader_page;
77ae365e
SR
433 local_t commit_overrun;
434 local_t overrun;
e4906eff 435 local_t entries;
fa743953
SR
436 local_t committing;
437 local_t commits;
77ae365e 438 unsigned long read;
7a8e76a3
SR
439 u64 write_stamp;
440 u64 read_stamp;
441 atomic_t record_disabled;
442};
443
444struct ring_buffer {
7a8e76a3
SR
445 unsigned pages;
446 unsigned flags;
447 int cpus;
7a8e76a3 448 atomic_t record_disabled;
00f62f61 449 cpumask_var_t cpumask;
7a8e76a3 450
1f8a6a10
PZ
451 struct lock_class_key *reader_lock_key;
452
7a8e76a3
SR
453 struct mutex mutex;
454
455 struct ring_buffer_per_cpu **buffers;
554f786e 456
59222efe 457#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
458 struct notifier_block cpu_notify;
459#endif
37886f6a 460 u64 (*clock)(void);
7a8e76a3
SR
461};
462
463struct ring_buffer_iter {
464 struct ring_buffer_per_cpu *cpu_buffer;
465 unsigned long head;
466 struct buffer_page *head_page;
467 u64 read_stamp;
468};
469
f536aafc 470/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
471#define RB_WARN_ON(b, cond) \
472 ({ \
473 int _____ret = unlikely(cond); \
474 if (_____ret) { \
475 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
476 struct ring_buffer_per_cpu *__b = \
477 (void *)b; \
478 atomic_inc(&__b->buffer->record_disabled); \
479 } else \
480 atomic_inc(&b->record_disabled); \
481 WARN_ON(1); \
482 } \
483 _____ret; \
3e89c7bb 484 })
f536aafc 485
37886f6a
SR
486/* Up this if you want to test the TIME_EXTENTS and normalization */
487#define DEBUG_SHIFT 0
488
6d3f1e12 489static inline u64 rb_time_stamp(struct ring_buffer *buffer)
88eb0125
SR
490{
491 /* shift to debug/test normalization and TIME_EXTENTS */
492 return buffer->clock() << DEBUG_SHIFT;
493}
494
37886f6a
SR
495u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
496{
497 u64 time;
498
499 preempt_disable_notrace();
6d3f1e12 500 time = rb_time_stamp(buffer);
37886f6a
SR
501 preempt_enable_no_resched_notrace();
502
503 return time;
504}
505EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
506
507void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
508 int cpu, u64 *ts)
509{
510 /* Just stupid testing the normalize function and deltas */
511 *ts >>= DEBUG_SHIFT;
512}
513EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
514
77ae365e
SR
515/*
516 * Making the ring buffer lockless makes things tricky.
517 * Although writes only happen on the CPU that they are on,
518 * and they only need to worry about interrupts. Reads can
519 * happen on any CPU.
520 *
521 * The reader page is always off the ring buffer, but when the
522 * reader finishes with a page, it needs to swap its page with
523 * a new one from the buffer. The reader needs to take from
524 * the head (writes go to the tail). But if a writer is in overwrite
525 * mode and wraps, it must push the head page forward.
526 *
527 * Here lies the problem.
528 *
529 * The reader must be careful to replace only the head page, and
530 * not another one. As described at the top of the file in the
531 * ASCII art, the reader sets its old page to point to the next
532 * page after head. It then sets the page after head to point to
533 * the old reader page. But if the writer moves the head page
534 * during this operation, the reader could end up with the tail.
535 *
536 * We use cmpxchg to help prevent this race. We also do something
537 * special with the page before head. We set the LSB to 1.
538 *
539 * When the writer must push the page forward, it will clear the
540 * bit that points to the head page, move the head, and then set
541 * the bit that points to the new head page.
542 *
543 * We also don't want an interrupt coming in and moving the head
544 * page on another writer. Thus we use the second LSB to catch
545 * that too. Thus:
546 *
547 * head->list->prev->next bit 1 bit 0
548 * ------- -------
549 * Normal page 0 0
550 * Points to head page 0 1
551 * New head page 1 0
552 *
553 * Note we can not trust the prev pointer of the head page, because:
554 *
555 * +----+ +-----+ +-----+
556 * | |------>| T |---X--->| N |
557 * | |<------| | | |
558 * +----+ +-----+ +-----+
559 * ^ ^ |
560 * | +-----+ | |
561 * +----------| R |----------+ |
562 * | |<-----------+
563 * +-----+
564 *
565 * Key: ---X--> HEAD flag set in pointer
566 * T Tail page
567 * R Reader page
568 * N Next page
569 *
570 * (see __rb_reserve_next() to see where this happens)
571 *
572 * What the above shows is that the reader just swapped out
573 * the reader page with a page in the buffer, but before it
574 * could make the new header point back to the new page added
575 * it was preempted by a writer. The writer moved forward onto
576 * the new page added by the reader and is about to move forward
577 * again.
578 *
579 * You can see, it is legitimate for the previous pointer of
580 * the head (or any page) not to point back to itself. But only
581 * temporarially.
582 */
583
584#define RB_PAGE_NORMAL 0UL
585#define RB_PAGE_HEAD 1UL
586#define RB_PAGE_UPDATE 2UL
587
588
589#define RB_FLAG_MASK 3UL
590
591/* PAGE_MOVED is not part of the mask */
592#define RB_PAGE_MOVED 4UL
593
594/*
595 * rb_list_head - remove any bit
596 */
597static struct list_head *rb_list_head(struct list_head *list)
598{
599 unsigned long val = (unsigned long)list;
600
601 return (struct list_head *)(val & ~RB_FLAG_MASK);
602}
603
604/*
6d3f1e12 605 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
606 *
607 * Because the reader may move the head_page pointer, we can
608 * not trust what the head page is (it may be pointing to
609 * the reader page). But if the next page is a header page,
610 * its flags will be non zero.
611 */
612static int inline
613rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
614 struct buffer_page *page, struct list_head *list)
615{
616 unsigned long val;
617
618 val = (unsigned long)list->next;
619
620 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
621 return RB_PAGE_MOVED;
622
623 return val & RB_FLAG_MASK;
624}
625
626/*
627 * rb_is_reader_page
628 *
629 * The unique thing about the reader page, is that, if the
630 * writer is ever on it, the previous pointer never points
631 * back to the reader page.
632 */
633static int rb_is_reader_page(struct buffer_page *page)
634{
635 struct list_head *list = page->list.prev;
636
637 return rb_list_head(list->next) != &page->list;
638}
639
640/*
641 * rb_set_list_to_head - set a list_head to be pointing to head.
642 */
643static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
644 struct list_head *list)
645{
646 unsigned long *ptr;
647
648 ptr = (unsigned long *)&list->next;
649 *ptr |= RB_PAGE_HEAD;
650 *ptr &= ~RB_PAGE_UPDATE;
651}
652
653/*
654 * rb_head_page_activate - sets up head page
655 */
656static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
657{
658 struct buffer_page *head;
659
660 head = cpu_buffer->head_page;
661 if (!head)
662 return;
663
664 /*
665 * Set the previous list pointer to have the HEAD flag.
666 */
667 rb_set_list_to_head(cpu_buffer, head->list.prev);
668}
669
670static void rb_list_head_clear(struct list_head *list)
671{
672 unsigned long *ptr = (unsigned long *)&list->next;
673
674 *ptr &= ~RB_FLAG_MASK;
675}
676
677/*
678 * rb_head_page_dactivate - clears head page ptr (for free list)
679 */
680static void
681rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
682{
683 struct list_head *hd;
684
685 /* Go through the whole list and clear any pointers found. */
686 rb_list_head_clear(cpu_buffer->pages);
687
688 list_for_each(hd, cpu_buffer->pages)
689 rb_list_head_clear(hd);
690}
691
692static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
693 struct buffer_page *head,
694 struct buffer_page *prev,
695 int old_flag, int new_flag)
696{
697 struct list_head *list;
698 unsigned long val = (unsigned long)&head->list;
699 unsigned long ret;
700
701 list = &prev->list;
702
703 val &= ~RB_FLAG_MASK;
704
08a40816
SR
705 ret = cmpxchg((unsigned long *)&list->next,
706 val | old_flag, val | new_flag);
77ae365e
SR
707
708 /* check if the reader took the page */
709 if ((ret & ~RB_FLAG_MASK) != val)
710 return RB_PAGE_MOVED;
711
712 return ret & RB_FLAG_MASK;
713}
714
715static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
716 struct buffer_page *head,
717 struct buffer_page *prev,
718 int old_flag)
719{
720 return rb_head_page_set(cpu_buffer, head, prev,
721 old_flag, RB_PAGE_UPDATE);
722}
723
724static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
725 struct buffer_page *head,
726 struct buffer_page *prev,
727 int old_flag)
728{
729 return rb_head_page_set(cpu_buffer, head, prev,
730 old_flag, RB_PAGE_HEAD);
731}
732
733static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
734 struct buffer_page *head,
735 struct buffer_page *prev,
736 int old_flag)
737{
738 return rb_head_page_set(cpu_buffer, head, prev,
739 old_flag, RB_PAGE_NORMAL);
740}
741
742static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
743 struct buffer_page **bpage)
744{
745 struct list_head *p = rb_list_head((*bpage)->list.next);
746
747 *bpage = list_entry(p, struct buffer_page, list);
748}
749
750static struct buffer_page *
751rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
752{
753 struct buffer_page *head;
754 struct buffer_page *page;
755 struct list_head *list;
756 int i;
757
758 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
759 return NULL;
760
761 /* sanity check */
762 list = cpu_buffer->pages;
763 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
764 return NULL;
765
766 page = head = cpu_buffer->head_page;
767 /*
768 * It is possible that the writer moves the header behind
769 * where we started, and we miss in one loop.
770 * A second loop should grab the header, but we'll do
771 * three loops just because I'm paranoid.
772 */
773 for (i = 0; i < 3; i++) {
774 do {
775 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
776 cpu_buffer->head_page = page;
777 return page;
778 }
779 rb_inc_page(cpu_buffer, &page);
780 } while (page != head);
781 }
782
783 RB_WARN_ON(cpu_buffer, 1);
784
785 return NULL;
786}
787
788static int rb_head_page_replace(struct buffer_page *old,
789 struct buffer_page *new)
790{
791 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
792 unsigned long val;
793 unsigned long ret;
794
795 val = *ptr & ~RB_FLAG_MASK;
796 val |= RB_PAGE_HEAD;
797
08a40816 798 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
77ae365e
SR
799
800 return ret == val;
801}
802
803/*
804 * rb_tail_page_update - move the tail page forward
805 *
806 * Returns 1 if moved tail page, 0 if someone else did.
807 */
808static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
809 struct buffer_page *tail_page,
810 struct buffer_page *next_page)
811{
812 struct buffer_page *old_tail;
813 unsigned long old_entries;
814 unsigned long old_write;
815 int ret = 0;
816
817 /*
818 * The tail page now needs to be moved forward.
819 *
820 * We need to reset the tail page, but without messing
821 * with possible erasing of data brought in by interrupts
822 * that have moved the tail page and are currently on it.
823 *
824 * We add a counter to the write field to denote this.
825 */
826 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
827 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
828
829 /*
830 * Just make sure we have seen our old_write and synchronize
831 * with any interrupts that come in.
832 */
833 barrier();
834
835 /*
836 * If the tail page is still the same as what we think
837 * it is, then it is up to us to update the tail
838 * pointer.
839 */
840 if (tail_page == cpu_buffer->tail_page) {
841 /* Zero the write counter */
842 unsigned long val = old_write & ~RB_WRITE_MASK;
843 unsigned long eval = old_entries & ~RB_WRITE_MASK;
844
845 /*
846 * This will only succeed if an interrupt did
847 * not come in and change it. In which case, we
848 * do not want to modify it.
da706d8b
LJ
849 *
850 * We add (void) to let the compiler know that we do not care
851 * about the return value of these functions. We use the
852 * cmpxchg to only update if an interrupt did not already
853 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 854 */
da706d8b
LJ
855 (void)local_cmpxchg(&next_page->write, old_write, val);
856 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
857
858 /*
859 * No need to worry about races with clearing out the commit.
860 * it only can increment when a commit takes place. But that
861 * only happens in the outer most nested commit.
862 */
863 local_set(&next_page->page->commit, 0);
864
865 old_tail = cmpxchg(&cpu_buffer->tail_page,
866 tail_page, next_page);
867
868 if (old_tail == tail_page)
869 ret = 1;
870 }
871
872 return ret;
873}
874
875static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
876 struct buffer_page *bpage)
877{
878 unsigned long val = (unsigned long)bpage;
879
880 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
881 return 1;
882
883 return 0;
884}
885
886/**
887 * rb_check_list - make sure a pointer to a list has the last bits zero
888 */
889static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
890 struct list_head *list)
891{
892 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
893 return 1;
894 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
895 return 1;
896 return 0;
897}
898
7a8e76a3
SR
899/**
900 * check_pages - integrity check of buffer pages
901 * @cpu_buffer: CPU buffer with pages to test
902 *
c3706f00 903 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
904 * been corrupted.
905 */
906static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
907{
3adc54fa 908 struct list_head *head = cpu_buffer->pages;
044fa782 909 struct buffer_page *bpage, *tmp;
7a8e76a3 910
77ae365e
SR
911 rb_head_page_deactivate(cpu_buffer);
912
3e89c7bb
SR
913 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
914 return -1;
915 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
916 return -1;
7a8e76a3 917
77ae365e
SR
918 if (rb_check_list(cpu_buffer, head))
919 return -1;
920
044fa782 921 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 922 if (RB_WARN_ON(cpu_buffer,
044fa782 923 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
924 return -1;
925 if (RB_WARN_ON(cpu_buffer,
044fa782 926 bpage->list.prev->next != &bpage->list))
3e89c7bb 927 return -1;
77ae365e
SR
928 if (rb_check_list(cpu_buffer, &bpage->list))
929 return -1;
7a8e76a3
SR
930 }
931
77ae365e
SR
932 rb_head_page_activate(cpu_buffer);
933
7a8e76a3
SR
934 return 0;
935}
936
7a8e76a3
SR
937static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
938 unsigned nr_pages)
939{
044fa782 940 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
941 unsigned long addr;
942 LIST_HEAD(pages);
943 unsigned i;
944
3adc54fa
SR
945 WARN_ON(!nr_pages);
946
7a8e76a3 947 for (i = 0; i < nr_pages; i++) {
044fa782 948 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
aa1e0e3b 949 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
044fa782 950 if (!bpage)
e4c2ce82 951 goto free_pages;
77ae365e
SR
952
953 rb_check_bpage(cpu_buffer, bpage);
954
044fa782 955 list_add(&bpage->list, &pages);
e4c2ce82 956
7a8e76a3
SR
957 addr = __get_free_page(GFP_KERNEL);
958 if (!addr)
959 goto free_pages;
044fa782
SR
960 bpage->page = (void *)addr;
961 rb_init_page(bpage->page);
7a8e76a3
SR
962 }
963
3adc54fa
SR
964 /*
965 * The ring buffer page list is a circular list that does not
966 * start and end with a list head. All page list items point to
967 * other pages.
968 */
969 cpu_buffer->pages = pages.next;
970 list_del(&pages);
7a8e76a3
SR
971
972 rb_check_pages(cpu_buffer);
973
974 return 0;
975
976 free_pages:
044fa782
SR
977 list_for_each_entry_safe(bpage, tmp, &pages, list) {
978 list_del_init(&bpage->list);
979 free_buffer_page(bpage);
7a8e76a3
SR
980 }
981 return -ENOMEM;
982}
983
984static struct ring_buffer_per_cpu *
985rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
986{
987 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 988 struct buffer_page *bpage;
d769041f 989 unsigned long addr;
7a8e76a3
SR
990 int ret;
991
992 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
993 GFP_KERNEL, cpu_to_node(cpu));
994 if (!cpu_buffer)
995 return NULL;
996
997 cpu_buffer->cpu = cpu;
998 cpu_buffer->buffer = buffer;
f83c9d0f 999 spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
edc35bd7 1001 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7a8e76a3 1002
044fa782 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1004 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1005 if (!bpage)
e4c2ce82
SR
1006 goto fail_free_buffer;
1007
77ae365e
SR
1008 rb_check_bpage(cpu_buffer, bpage);
1009
044fa782 1010 cpu_buffer->reader_page = bpage;
d769041f
SR
1011 addr = __get_free_page(GFP_KERNEL);
1012 if (!addr)
e4c2ce82 1013 goto fail_free_reader;
044fa782
SR
1014 bpage->page = (void *)addr;
1015 rb_init_page(bpage->page);
e4c2ce82 1016
d769041f 1017 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 1018
7a8e76a3
SR
1019 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1020 if (ret < 0)
d769041f 1021 goto fail_free_reader;
7a8e76a3
SR
1022
1023 cpu_buffer->head_page
3adc54fa 1024 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1025 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1026
77ae365e
SR
1027 rb_head_page_activate(cpu_buffer);
1028
7a8e76a3
SR
1029 return cpu_buffer;
1030
d769041f
SR
1031 fail_free_reader:
1032 free_buffer_page(cpu_buffer->reader_page);
1033
7a8e76a3
SR
1034 fail_free_buffer:
1035 kfree(cpu_buffer);
1036 return NULL;
1037}
1038
1039static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1040{
3adc54fa 1041 struct list_head *head = cpu_buffer->pages;
044fa782 1042 struct buffer_page *bpage, *tmp;
7a8e76a3 1043
d769041f
SR
1044 free_buffer_page(cpu_buffer->reader_page);
1045
77ae365e
SR
1046 rb_head_page_deactivate(cpu_buffer);
1047
3adc54fa
SR
1048 if (head) {
1049 list_for_each_entry_safe(bpage, tmp, head, list) {
1050 list_del_init(&bpage->list);
1051 free_buffer_page(bpage);
1052 }
1053 bpage = list_entry(head, struct buffer_page, list);
044fa782 1054 free_buffer_page(bpage);
7a8e76a3 1055 }
3adc54fa 1056
7a8e76a3
SR
1057 kfree(cpu_buffer);
1058}
1059
59222efe 1060#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
1061static int rb_cpu_notify(struct notifier_block *self,
1062 unsigned long action, void *hcpu);
554f786e
SR
1063#endif
1064
7a8e76a3
SR
1065/**
1066 * ring_buffer_alloc - allocate a new ring_buffer
68814b58 1067 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
1068 * @flags: attributes to set for the ring buffer.
1069 *
1070 * Currently the only flag that is available is the RB_FL_OVERWRITE
1071 * flag. This flag means that the buffer will overwrite old data
1072 * when the buffer wraps. If this flag is not set, the buffer will
1073 * drop data when the tail hits the head.
1074 */
1f8a6a10
PZ
1075struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1076 struct lock_class_key *key)
7a8e76a3
SR
1077{
1078 struct ring_buffer *buffer;
1079 int bsize;
1080 int cpu;
1081
1082 /* keep it in its own cache line */
1083 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1084 GFP_KERNEL);
1085 if (!buffer)
1086 return NULL;
1087
9e01c1b7
RR
1088 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1089 goto fail_free_buffer;
1090
7a8e76a3
SR
1091 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1092 buffer->flags = flags;
37886f6a 1093 buffer->clock = trace_clock_local;
1f8a6a10 1094 buffer->reader_lock_key = key;
7a8e76a3
SR
1095
1096 /* need at least two pages */
5f78abee
SR
1097 if (buffer->pages < 2)
1098 buffer->pages = 2;
7a8e76a3 1099
3bf832ce
FW
1100 /*
1101 * In case of non-hotplug cpu, if the ring-buffer is allocated
1102 * in early initcall, it will not be notified of secondary cpus.
1103 * In that off case, we need to allocate for all possible cpus.
1104 */
1105#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1106 get_online_cpus();
1107 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
1108#else
1109 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1110#endif
7a8e76a3
SR
1111 buffer->cpus = nr_cpu_ids;
1112
1113 bsize = sizeof(void *) * nr_cpu_ids;
1114 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1115 GFP_KERNEL);
1116 if (!buffer->buffers)
9e01c1b7 1117 goto fail_free_cpumask;
7a8e76a3
SR
1118
1119 for_each_buffer_cpu(buffer, cpu) {
1120 buffer->buffers[cpu] =
1121 rb_allocate_cpu_buffer(buffer, cpu);
1122 if (!buffer->buffers[cpu])
1123 goto fail_free_buffers;
1124 }
1125
59222efe 1126#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1127 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1128 buffer->cpu_notify.priority = 0;
1129 register_cpu_notifier(&buffer->cpu_notify);
1130#endif
1131
1132 put_online_cpus();
7a8e76a3
SR
1133 mutex_init(&buffer->mutex);
1134
1135 return buffer;
1136
1137 fail_free_buffers:
1138 for_each_buffer_cpu(buffer, cpu) {
1139 if (buffer->buffers[cpu])
1140 rb_free_cpu_buffer(buffer->buffers[cpu]);
1141 }
1142 kfree(buffer->buffers);
1143
9e01c1b7
RR
1144 fail_free_cpumask:
1145 free_cpumask_var(buffer->cpumask);
554f786e 1146 put_online_cpus();
9e01c1b7 1147
7a8e76a3
SR
1148 fail_free_buffer:
1149 kfree(buffer);
1150 return NULL;
1151}
1f8a6a10 1152EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1153
1154/**
1155 * ring_buffer_free - free a ring buffer.
1156 * @buffer: the buffer to free.
1157 */
1158void
1159ring_buffer_free(struct ring_buffer *buffer)
1160{
1161 int cpu;
1162
554f786e
SR
1163 get_online_cpus();
1164
59222efe 1165#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1166 unregister_cpu_notifier(&buffer->cpu_notify);
1167#endif
1168
7a8e76a3
SR
1169 for_each_buffer_cpu(buffer, cpu)
1170 rb_free_cpu_buffer(buffer->buffers[cpu]);
1171
554f786e
SR
1172 put_online_cpus();
1173
bd3f0221 1174 kfree(buffer->buffers);
9e01c1b7
RR
1175 free_cpumask_var(buffer->cpumask);
1176
7a8e76a3
SR
1177 kfree(buffer);
1178}
c4f50183 1179EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1180
37886f6a
SR
1181void ring_buffer_set_clock(struct ring_buffer *buffer,
1182 u64 (*clock)(void))
1183{
1184 buffer->clock = clock;
1185}
1186
7a8e76a3
SR
1187static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1188
1189static void
1190rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1191{
044fa782 1192 struct buffer_page *bpage;
7a8e76a3
SR
1193 struct list_head *p;
1194 unsigned i;
1195
f7112949 1196 spin_lock_irq(&cpu_buffer->reader_lock);
77ae365e
SR
1197 rb_head_page_deactivate(cpu_buffer);
1198
7a8e76a3 1199 for (i = 0; i < nr_pages; i++) {
3adc54fa 1200 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
3e89c7bb 1201 return;
3adc54fa 1202 p = cpu_buffer->pages->next;
044fa782
SR
1203 bpage = list_entry(p, struct buffer_page, list);
1204 list_del_init(&bpage->list);
1205 free_buffer_page(bpage);
7a8e76a3 1206 }
3adc54fa 1207 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
3e89c7bb 1208 return;
7a8e76a3
SR
1209
1210 rb_reset_cpu(cpu_buffer);
7a8e76a3
SR
1211 rb_check_pages(cpu_buffer);
1212
dd7f5943 1213 spin_unlock_irq(&cpu_buffer->reader_lock);
7a8e76a3
SR
1214}
1215
1216static void
1217rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1218 struct list_head *pages, unsigned nr_pages)
1219{
044fa782 1220 struct buffer_page *bpage;
7a8e76a3
SR
1221 struct list_head *p;
1222 unsigned i;
1223
77ae365e
SR
1224 spin_lock_irq(&cpu_buffer->reader_lock);
1225 rb_head_page_deactivate(cpu_buffer);
1226
7a8e76a3 1227 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
1228 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1229 return;
7a8e76a3 1230 p = pages->next;
044fa782
SR
1231 bpage = list_entry(p, struct buffer_page, list);
1232 list_del_init(&bpage->list);
3adc54fa 1233 list_add_tail(&bpage->list, cpu_buffer->pages);
7a8e76a3
SR
1234 }
1235 rb_reset_cpu(cpu_buffer);
7a8e76a3
SR
1236 rb_check_pages(cpu_buffer);
1237
dd7f5943 1238 spin_unlock_irq(&cpu_buffer->reader_lock);
7a8e76a3
SR
1239}
1240
1241/**
1242 * ring_buffer_resize - resize the ring buffer
1243 * @buffer: the buffer to resize.
1244 * @size: the new size.
1245 *
7a8e76a3
SR
1246 * Minimum size is 2 * BUF_PAGE_SIZE.
1247 *
1248 * Returns -1 on failure.
1249 */
1250int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1251{
1252 struct ring_buffer_per_cpu *cpu_buffer;
1253 unsigned nr_pages, rm_pages, new_pages;
044fa782 1254 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
1255 unsigned long buffer_size;
1256 unsigned long addr;
1257 LIST_HEAD(pages);
1258 int i, cpu;
1259
ee51a1de
IM
1260 /*
1261 * Always succeed at resizing a non-existent buffer:
1262 */
1263 if (!buffer)
1264 return size;
1265
7a8e76a3
SR
1266 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1267 size *= BUF_PAGE_SIZE;
1268 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1269
1270 /* we need a minimum of two pages */
1271 if (size < BUF_PAGE_SIZE * 2)
1272 size = BUF_PAGE_SIZE * 2;
1273
1274 if (size == buffer_size)
1275 return size;
1276
18421015
SR
1277 atomic_inc(&buffer->record_disabled);
1278
1279 /* Make sure all writers are done with this buffer. */
1280 synchronize_sched();
1281
7a8e76a3 1282 mutex_lock(&buffer->mutex);
554f786e 1283 get_online_cpus();
7a8e76a3
SR
1284
1285 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1286
1287 if (size < buffer_size) {
1288
1289 /* easy case, just free pages */
554f786e
SR
1290 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1291 goto out_fail;
7a8e76a3
SR
1292
1293 rm_pages = buffer->pages - nr_pages;
1294
1295 for_each_buffer_cpu(buffer, cpu) {
1296 cpu_buffer = buffer->buffers[cpu];
1297 rb_remove_pages(cpu_buffer, rm_pages);
1298 }
1299 goto out;
1300 }
1301
1302 /*
1303 * This is a bit more difficult. We only want to add pages
1304 * when we can allocate enough for all CPUs. We do this
1305 * by allocating all the pages and storing them on a local
1306 * link list. If we succeed in our allocation, then we
1307 * add these pages to the cpu_buffers. Otherwise we just free
1308 * them all and return -ENOMEM;
1309 */
554f786e
SR
1310 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1311 goto out_fail;
f536aafc 1312
7a8e76a3
SR
1313 new_pages = nr_pages - buffer->pages;
1314
1315 for_each_buffer_cpu(buffer, cpu) {
1316 for (i = 0; i < new_pages; i++) {
044fa782 1317 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
e4c2ce82
SR
1318 cache_line_size()),
1319 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1320 if (!bpage)
e4c2ce82 1321 goto free_pages;
044fa782 1322 list_add(&bpage->list, &pages);
7a8e76a3
SR
1323 addr = __get_free_page(GFP_KERNEL);
1324 if (!addr)
1325 goto free_pages;
044fa782
SR
1326 bpage->page = (void *)addr;
1327 rb_init_page(bpage->page);
7a8e76a3
SR
1328 }
1329 }
1330
1331 for_each_buffer_cpu(buffer, cpu) {
1332 cpu_buffer = buffer->buffers[cpu];
1333 rb_insert_pages(cpu_buffer, &pages, new_pages);
1334 }
1335
554f786e
SR
1336 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1337 goto out_fail;
7a8e76a3
SR
1338
1339 out:
1340 buffer->pages = nr_pages;
554f786e 1341 put_online_cpus();
7a8e76a3
SR
1342 mutex_unlock(&buffer->mutex);
1343
18421015
SR
1344 atomic_dec(&buffer->record_disabled);
1345
7a8e76a3
SR
1346 return size;
1347
1348 free_pages:
044fa782
SR
1349 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1350 list_del_init(&bpage->list);
1351 free_buffer_page(bpage);
7a8e76a3 1352 }
554f786e 1353 put_online_cpus();
641d2f63 1354 mutex_unlock(&buffer->mutex);
18421015 1355 atomic_dec(&buffer->record_disabled);
7a8e76a3 1356 return -ENOMEM;
554f786e
SR
1357
1358 /*
1359 * Something went totally wrong, and we are too paranoid
1360 * to even clean up the mess.
1361 */
1362 out_fail:
1363 put_online_cpus();
1364 mutex_unlock(&buffer->mutex);
18421015 1365 atomic_dec(&buffer->record_disabled);
554f786e 1366 return -1;
7a8e76a3 1367}
c4f50183 1368EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 1369
8789a9e7 1370static inline void *
044fa782 1371__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 1372{
044fa782 1373 return bpage->data + index;
8789a9e7
SR
1374}
1375
044fa782 1376static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 1377{
044fa782 1378 return bpage->page->data + index;
7a8e76a3
SR
1379}
1380
1381static inline struct ring_buffer_event *
d769041f 1382rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1383{
6f807acd
SR
1384 return __rb_page_index(cpu_buffer->reader_page,
1385 cpu_buffer->reader_page->read);
1386}
1387
7a8e76a3
SR
1388static inline struct ring_buffer_event *
1389rb_iter_head_event(struct ring_buffer_iter *iter)
1390{
6f807acd 1391 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
1392}
1393
77ae365e 1394static inline unsigned long rb_page_write(struct buffer_page *bpage)
bf41a158 1395{
77ae365e 1396 return local_read(&bpage->write) & RB_WRITE_MASK;
bf41a158
SR
1397}
1398
1399static inline unsigned rb_page_commit(struct buffer_page *bpage)
1400{
abc9b56d 1401 return local_read(&bpage->page->commit);
bf41a158
SR
1402}
1403
77ae365e
SR
1404static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1405{
1406 return local_read(&bpage->entries) & RB_WRITE_MASK;
1407}
1408
bf41a158
SR
1409/* Size is determined by what has been commited */
1410static inline unsigned rb_page_size(struct buffer_page *bpage)
1411{
1412 return rb_page_commit(bpage);
1413}
1414
1415static inline unsigned
1416rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1417{
1418 return rb_page_commit(cpu_buffer->commit_page);
1419}
1420
bf41a158
SR
1421static inline unsigned
1422rb_event_index(struct ring_buffer_event *event)
1423{
1424 unsigned long addr = (unsigned long)event;
1425
22f470f8 1426 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
bf41a158
SR
1427}
1428
0f0c85fc 1429static inline int
fa743953
SR
1430rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1431 struct ring_buffer_event *event)
bf41a158
SR
1432{
1433 unsigned long addr = (unsigned long)event;
1434 unsigned long index;
1435
1436 index = rb_event_index(event);
1437 addr &= PAGE_MASK;
1438
1439 return cpu_buffer->commit_page->page == (void *)addr &&
1440 rb_commit_index(cpu_buffer) == index;
1441}
1442
34a148bf 1443static void
bf41a158 1444rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1445{
77ae365e
SR
1446 unsigned long max_count;
1447
bf41a158
SR
1448 /*
1449 * We only race with interrupts and NMIs on this CPU.
1450 * If we own the commit event, then we can commit
1451 * all others that interrupted us, since the interruptions
1452 * are in stack format (they finish before they come
1453 * back to us). This allows us to do a simple loop to
1454 * assign the commit to the tail.
1455 */
a8ccf1d6 1456 again:
77ae365e
SR
1457 max_count = cpu_buffer->buffer->pages * 100;
1458
bf41a158 1459 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
77ae365e
SR
1460 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1461 return;
1462 if (RB_WARN_ON(cpu_buffer,
1463 rb_is_reader_page(cpu_buffer->tail_page)))
1464 return;
1465 local_set(&cpu_buffer->commit_page->page->commit,
1466 rb_page_write(cpu_buffer->commit_page));
bf41a158 1467 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1468 cpu_buffer->write_stamp =
1469 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1470 /* add barrier to keep gcc from optimizing too much */
1471 barrier();
1472 }
1473 while (rb_commit_index(cpu_buffer) !=
1474 rb_page_write(cpu_buffer->commit_page)) {
77ae365e
SR
1475
1476 local_set(&cpu_buffer->commit_page->page->commit,
1477 rb_page_write(cpu_buffer->commit_page));
1478 RB_WARN_ON(cpu_buffer,
1479 local_read(&cpu_buffer->commit_page->page->commit) &
1480 ~RB_WRITE_MASK);
bf41a158
SR
1481 barrier();
1482 }
a8ccf1d6
SR
1483
1484 /* again, keep gcc from optimizing */
1485 barrier();
1486
1487 /*
1488 * If an interrupt came in just after the first while loop
1489 * and pushed the tail page forward, we will be left with
1490 * a dangling commit that will never go forward.
1491 */
1492 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1493 goto again;
7a8e76a3
SR
1494}
1495
d769041f 1496static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1497{
abc9b56d 1498 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1499 cpu_buffer->reader_page->read = 0;
d769041f
SR
1500}
1501
34a148bf 1502static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1503{
1504 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1505
1506 /*
1507 * The iterator could be on the reader page (it starts there).
1508 * But the head could have moved, since the reader was
1509 * found. Check for this case and assign the iterator
1510 * to the head page instead of next.
1511 */
1512 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 1513 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f
SR
1514 else
1515 rb_inc_page(cpu_buffer, &iter->head_page);
1516
abc9b56d 1517 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1518 iter->head = 0;
1519}
1520
1521/**
1522 * ring_buffer_update_event - update event type and data
1523 * @event: the even to update
1524 * @type: the type of event
1525 * @length: the size of the event field in the ring buffer
1526 *
1527 * Update the type and data fields of the event. The length
1528 * is the actual size that is written to the ring buffer,
1529 * and with this, we can determine what to place into the
1530 * data field.
1531 */
34a148bf 1532static void
7a8e76a3
SR
1533rb_update_event(struct ring_buffer_event *event,
1534 unsigned type, unsigned length)
1535{
334d4169 1536 event->type_len = type;
7a8e76a3
SR
1537
1538 switch (type) {
1539
1540 case RINGBUF_TYPE_PADDING:
7a8e76a3 1541 case RINGBUF_TYPE_TIME_EXTEND:
7a8e76a3 1542 case RINGBUF_TYPE_TIME_STAMP:
7a8e76a3
SR
1543 break;
1544
334d4169 1545 case 0:
7a8e76a3 1546 length -= RB_EVNT_HDR_SIZE;
334d4169 1547 if (length > RB_MAX_SMALL_DATA)
7a8e76a3 1548 event->array[0] = length;
334d4169
LJ
1549 else
1550 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
1551 break;
1552 default:
1553 BUG();
1554 }
1555}
1556
77ae365e
SR
1557/*
1558 * rb_handle_head_page - writer hit the head page
1559 *
1560 * Returns: +1 to retry page
1561 * 0 to continue
1562 * -1 on error
1563 */
1564static int
1565rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1566 struct buffer_page *tail_page,
1567 struct buffer_page *next_page)
1568{
1569 struct buffer_page *new_head;
1570 int entries;
1571 int type;
1572 int ret;
1573
1574 entries = rb_page_entries(next_page);
1575
1576 /*
1577 * The hard part is here. We need to move the head
1578 * forward, and protect against both readers on
1579 * other CPUs and writers coming in via interrupts.
1580 */
1581 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1582 RB_PAGE_HEAD);
1583
1584 /*
1585 * type can be one of four:
1586 * NORMAL - an interrupt already moved it for us
1587 * HEAD - we are the first to get here.
1588 * UPDATE - we are the interrupt interrupting
1589 * a current move.
1590 * MOVED - a reader on another CPU moved the next
1591 * pointer to its reader page. Give up
1592 * and try again.
1593 */
1594
1595 switch (type) {
1596 case RB_PAGE_HEAD:
1597 /*
1598 * We changed the head to UPDATE, thus
1599 * it is our responsibility to update
1600 * the counters.
1601 */
1602 local_add(entries, &cpu_buffer->overrun);
1603
1604 /*
1605 * The entries will be zeroed out when we move the
1606 * tail page.
1607 */
1608
1609 /* still more to do */
1610 break;
1611
1612 case RB_PAGE_UPDATE:
1613 /*
1614 * This is an interrupt that interrupt the
1615 * previous update. Still more to do.
1616 */
1617 break;
1618 case RB_PAGE_NORMAL:
1619 /*
1620 * An interrupt came in before the update
1621 * and processed this for us.
1622 * Nothing left to do.
1623 */
1624 return 1;
1625 case RB_PAGE_MOVED:
1626 /*
1627 * The reader is on another CPU and just did
1628 * a swap with our next_page.
1629 * Try again.
1630 */
1631 return 1;
1632 default:
1633 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1634 return -1;
1635 }
1636
1637 /*
1638 * Now that we are here, the old head pointer is
1639 * set to UPDATE. This will keep the reader from
1640 * swapping the head page with the reader page.
1641 * The reader (on another CPU) will spin till
1642 * we are finished.
1643 *
1644 * We just need to protect against interrupts
1645 * doing the job. We will set the next pointer
1646 * to HEAD. After that, we set the old pointer
1647 * to NORMAL, but only if it was HEAD before.
1648 * otherwise we are an interrupt, and only
1649 * want the outer most commit to reset it.
1650 */
1651 new_head = next_page;
1652 rb_inc_page(cpu_buffer, &new_head);
1653
1654 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1655 RB_PAGE_NORMAL);
1656
1657 /*
1658 * Valid returns are:
1659 * HEAD - an interrupt came in and already set it.
1660 * NORMAL - One of two things:
1661 * 1) We really set it.
1662 * 2) A bunch of interrupts came in and moved
1663 * the page forward again.
1664 */
1665 switch (ret) {
1666 case RB_PAGE_HEAD:
1667 case RB_PAGE_NORMAL:
1668 /* OK */
1669 break;
1670 default:
1671 RB_WARN_ON(cpu_buffer, 1);
1672 return -1;
1673 }
1674
1675 /*
1676 * It is possible that an interrupt came in,
1677 * set the head up, then more interrupts came in
1678 * and moved it again. When we get back here,
1679 * the page would have been set to NORMAL but we
1680 * just set it back to HEAD.
1681 *
1682 * How do you detect this? Well, if that happened
1683 * the tail page would have moved.
1684 */
1685 if (ret == RB_PAGE_NORMAL) {
1686 /*
1687 * If the tail had moved passed next, then we need
1688 * to reset the pointer.
1689 */
1690 if (cpu_buffer->tail_page != tail_page &&
1691 cpu_buffer->tail_page != next_page)
1692 rb_head_page_set_normal(cpu_buffer, new_head,
1693 next_page,
1694 RB_PAGE_HEAD);
1695 }
1696
1697 /*
1698 * If this was the outer most commit (the one that
1699 * changed the original pointer from HEAD to UPDATE),
1700 * then it is up to us to reset it to NORMAL.
1701 */
1702 if (type == RB_PAGE_HEAD) {
1703 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1704 tail_page,
1705 RB_PAGE_UPDATE);
1706 if (RB_WARN_ON(cpu_buffer,
1707 ret != RB_PAGE_UPDATE))
1708 return -1;
1709 }
1710
1711 return 0;
1712}
1713
34a148bf 1714static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
1715{
1716 struct ring_buffer_event event; /* Used only for sizeof array */
1717
1718 /* zero length can cause confusions */
1719 if (!length)
1720 length = 1;
1721
1722 if (length > RB_MAX_SMALL_DATA)
1723 length += sizeof(event.array[0]);
1724
1725 length += RB_EVNT_HDR_SIZE;
1726 length = ALIGN(length, RB_ALIGNMENT);
1727
1728 return length;
1729}
1730
c7b09308
SR
1731static inline void
1732rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1733 struct buffer_page *tail_page,
1734 unsigned long tail, unsigned long length)
1735{
1736 struct ring_buffer_event *event;
1737
1738 /*
1739 * Only the event that crossed the page boundary
1740 * must fill the old tail_page with padding.
1741 */
1742 if (tail >= BUF_PAGE_SIZE) {
1743 local_sub(length, &tail_page->write);
1744 return;
1745 }
1746
1747 event = __rb_page_index(tail_page, tail);
b0b7065b 1748 kmemcheck_annotate_bitfield(event, bitfield);
c7b09308
SR
1749
1750 /*
1751 * If this event is bigger than the minimum size, then
1752 * we need to be careful that we don't subtract the
1753 * write counter enough to allow another writer to slip
1754 * in on this page.
1755 * We put in a discarded commit instead, to make sure
1756 * that this space is not used again.
1757 *
1758 * If we are less than the minimum size, we don't need to
1759 * worry about it.
1760 */
1761 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1762 /* No room for any events */
1763
1764 /* Mark the rest of the page with padding */
1765 rb_event_set_padding(event);
1766
1767 /* Set the write back to the previous setting */
1768 local_sub(length, &tail_page->write);
1769 return;
1770 }
1771
1772 /* Put in a discarded event */
1773 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1774 event->type_len = RINGBUF_TYPE_PADDING;
1775 /* time delta must be non zero */
1776 event->time_delta = 1;
c7b09308
SR
1777
1778 /* Set write to end of buffer */
1779 length = (tail + length) - BUF_PAGE_SIZE;
1780 local_sub(length, &tail_page->write);
1781}
6634ff26 1782
7a8e76a3 1783static struct ring_buffer_event *
6634ff26
SR
1784rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1785 unsigned long length, unsigned long tail,
6634ff26 1786 struct buffer_page *tail_page, u64 *ts)
7a8e76a3 1787{
5a50e33c 1788 struct buffer_page *commit_page = cpu_buffer->commit_page;
7a8e76a3 1789 struct ring_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
1790 struct buffer_page *next_page;
1791 int ret;
aa20ae84
SR
1792
1793 next_page = tail_page;
1794
aa20ae84
SR
1795 rb_inc_page(cpu_buffer, &next_page);
1796
aa20ae84
SR
1797 /*
1798 * If for some reason, we had an interrupt storm that made
1799 * it all the way around the buffer, bail, and warn
1800 * about it.
1801 */
1802 if (unlikely(next_page == commit_page)) {
77ae365e 1803 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
1804 goto out_reset;
1805 }
1806
77ae365e
SR
1807 /*
1808 * This is where the fun begins!
1809 *
1810 * We are fighting against races between a reader that
1811 * could be on another CPU trying to swap its reader
1812 * page with the buffer head.
1813 *
1814 * We are also fighting against interrupts coming in and
1815 * moving the head or tail on us as well.
1816 *
1817 * If the next page is the head page then we have filled
1818 * the buffer, unless the commit page is still on the
1819 * reader page.
1820 */
1821 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
aa20ae84 1822
77ae365e
SR
1823 /*
1824 * If the commit is not on the reader page, then
1825 * move the header page.
1826 */
1827 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1828 /*
1829 * If we are not in overwrite mode,
1830 * this is easy, just stop here.
1831 */
1832 if (!(buffer->flags & RB_FL_OVERWRITE))
1833 goto out_reset;
1834
1835 ret = rb_handle_head_page(cpu_buffer,
1836 tail_page,
1837 next_page);
1838 if (ret < 0)
1839 goto out_reset;
1840 if (ret)
1841 goto out_again;
1842 } else {
1843 /*
1844 * We need to be careful here too. The
1845 * commit page could still be on the reader
1846 * page. We could have a small buffer, and
1847 * have filled up the buffer with events
1848 * from interrupts and such, and wrapped.
1849 *
1850 * Note, if the tail page is also the on the
1851 * reader_page, we let it move out.
1852 */
1853 if (unlikely((cpu_buffer->commit_page !=
1854 cpu_buffer->tail_page) &&
1855 (cpu_buffer->commit_page ==
1856 cpu_buffer->reader_page))) {
1857 local_inc(&cpu_buffer->commit_overrun);
1858 goto out_reset;
1859 }
aa20ae84
SR
1860 }
1861 }
1862
77ae365e
SR
1863 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1864 if (ret) {
1865 /*
1866 * Nested commits always have zero deltas, so
1867 * just reread the time stamp
1868 */
6d3f1e12 1869 *ts = rb_time_stamp(buffer);
77ae365e 1870 next_page->page->time_stamp = *ts;
aa20ae84
SR
1871 }
1872
77ae365e 1873 out_again:
aa20ae84 1874
77ae365e 1875 rb_reset_tail(cpu_buffer, tail_page, tail, length);
aa20ae84
SR
1876
1877 /* fail and let the caller try again */
1878 return ERR_PTR(-EAGAIN);
1879
45141d46 1880 out_reset:
6f3b3440 1881 /* reset write */
c7b09308 1882 rb_reset_tail(cpu_buffer, tail_page, tail, length);
6f3b3440 1883
bf41a158 1884 return NULL;
7a8e76a3
SR
1885}
1886
6634ff26
SR
1887static struct ring_buffer_event *
1888__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1889 unsigned type, unsigned long length, u64 *ts)
1890{
5a50e33c 1891 struct buffer_page *tail_page;
6634ff26
SR
1892 struct ring_buffer_event *event;
1893 unsigned long tail, write;
1894
6634ff26
SR
1895 tail_page = cpu_buffer->tail_page;
1896 write = local_add_return(length, &tail_page->write);
77ae365e
SR
1897
1898 /* set write to only the index of the write */
1899 write &= RB_WRITE_MASK;
6634ff26
SR
1900 tail = write - length;
1901
1902 /* See if we shot pass the end of this buffer page */
1903 if (write > BUF_PAGE_SIZE)
1904 return rb_move_tail(cpu_buffer, length, tail,
5a50e33c 1905 tail_page, ts);
6634ff26
SR
1906
1907 /* We reserved something on the buffer */
1908
6634ff26 1909 event = __rb_page_index(tail_page, tail);
1744a21d 1910 kmemcheck_annotate_bitfield(event, bitfield);
6634ff26
SR
1911 rb_update_event(event, type, length);
1912
1913 /* The passed in type is zero for DATA */
1914 if (likely(!type))
1915 local_inc(&tail_page->entries);
1916
1917 /*
fa743953
SR
1918 * If this is the first commit on the page, then update
1919 * its timestamp.
6634ff26 1920 */
fa743953
SR
1921 if (!tail)
1922 tail_page->page->time_stamp = *ts;
6634ff26
SR
1923
1924 return event;
1925}
1926
edd813bf
SR
1927static inline int
1928rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1929 struct ring_buffer_event *event)
1930{
1931 unsigned long new_index, old_index;
1932 struct buffer_page *bpage;
1933 unsigned long index;
1934 unsigned long addr;
1935
1936 new_index = rb_event_index(event);
1937 old_index = new_index + rb_event_length(event);
1938 addr = (unsigned long)event;
1939 addr &= PAGE_MASK;
1940
1941 bpage = cpu_buffer->tail_page;
1942
1943 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77ae365e
SR
1944 unsigned long write_mask =
1945 local_read(&bpage->write) & ~RB_WRITE_MASK;
edd813bf
SR
1946 /*
1947 * This is on the tail page. It is possible that
1948 * a write could come in and move the tail page
1949 * and write to the next page. That is fine
1950 * because we just shorten what is on this page.
1951 */
77ae365e
SR
1952 old_index += write_mask;
1953 new_index += write_mask;
edd813bf
SR
1954 index = local_cmpxchg(&bpage->write, old_index, new_index);
1955 if (index == old_index)
1956 return 1;
1957 }
1958
1959 /* could not discard */
1960 return 0;
1961}
1962
7a8e76a3
SR
1963static int
1964rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1965 u64 *ts, u64 *delta)
1966{
1967 struct ring_buffer_event *event;
1968 static int once;
bf41a158 1969 int ret;
7a8e76a3
SR
1970
1971 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1972 printk(KERN_WARNING "Delta way too big! %llu"
1973 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1974 (unsigned long long)*delta,
1975 (unsigned long long)*ts,
1976 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1977 WARN_ON(1);
1978 }
1979
1980 /*
1981 * The delta is too big, we to add a
1982 * new timestamp.
1983 */
1984 event = __rb_reserve_next(cpu_buffer,
1985 RINGBUF_TYPE_TIME_EXTEND,
1986 RB_LEN_TIME_EXTEND,
1987 ts);
1988 if (!event)
bf41a158 1989 return -EBUSY;
7a8e76a3 1990
bf41a158
SR
1991 if (PTR_ERR(event) == -EAGAIN)
1992 return -EAGAIN;
1993
1994 /* Only a commited time event can update the write stamp */
fa743953 1995 if (rb_event_is_commit(cpu_buffer, event)) {
bf41a158 1996 /*
fa743953
SR
1997 * If this is the first on the page, then it was
1998 * updated with the page itself. Try to discard it
1999 * and if we can't just make it zero.
bf41a158
SR
2000 */
2001 if (rb_event_index(event)) {
2002 event->time_delta = *delta & TS_MASK;
2003 event->array[0] = *delta >> TS_SHIFT;
2004 } else {
ea05b57c
SR
2005 /* try to discard, since we do not need this */
2006 if (!rb_try_to_discard(cpu_buffer, event)) {
2007 /* nope, just zero it */
2008 event->time_delta = 0;
2009 event->array[0] = 0;
2010 }
bf41a158 2011 }
7a8e76a3 2012 cpu_buffer->write_stamp = *ts;
bf41a158
SR
2013 /* let the caller know this was the commit */
2014 ret = 1;
2015 } else {
edd813bf
SR
2016 /* Try to discard the event */
2017 if (!rb_try_to_discard(cpu_buffer, event)) {
2018 /* Darn, this is just wasted space */
2019 event->time_delta = 0;
2020 event->array[0] = 0;
edd813bf 2021 }
f57a8a19 2022 ret = 0;
7a8e76a3
SR
2023 }
2024
bf41a158
SR
2025 *delta = 0;
2026
2027 return ret;
7a8e76a3
SR
2028}
2029
fa743953
SR
2030static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2031{
2032 local_inc(&cpu_buffer->committing);
2033 local_inc(&cpu_buffer->commits);
2034}
2035
2036static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2037{
2038 unsigned long commits;
2039
2040 if (RB_WARN_ON(cpu_buffer,
2041 !local_read(&cpu_buffer->committing)))
2042 return;
2043
2044 again:
2045 commits = local_read(&cpu_buffer->commits);
2046 /* synchronize with interrupts */
2047 barrier();
2048 if (local_read(&cpu_buffer->committing) == 1)
2049 rb_set_commit_to_write(cpu_buffer);
2050
2051 local_dec(&cpu_buffer->committing);
2052
2053 /* synchronize with interrupts */
2054 barrier();
2055
2056 /*
2057 * Need to account for interrupts coming in between the
2058 * updating of the commit page and the clearing of the
2059 * committing counter.
2060 */
2061 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2062 !local_read(&cpu_buffer->committing)) {
2063 local_inc(&cpu_buffer->committing);
2064 goto again;
2065 }
2066}
2067
7a8e76a3 2068static struct ring_buffer_event *
62f0b3eb
SR
2069rb_reserve_next_event(struct ring_buffer *buffer,
2070 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 2071 unsigned long length)
7a8e76a3
SR
2072{
2073 struct ring_buffer_event *event;
168b6b1d 2074 u64 ts, delta = 0;
bf41a158 2075 int commit = 0;
818e3dd3 2076 int nr_loops = 0;
7a8e76a3 2077
fa743953
SR
2078 rb_start_commit(cpu_buffer);
2079
85bac32c 2080#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
2081 /*
2082 * Due to the ability to swap a cpu buffer from a buffer
2083 * it is possible it was swapped before we committed.
2084 * (committing stops a swap). We check for it here and
2085 * if it happened, we have to fail the write.
2086 */
2087 barrier();
2088 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2089 local_dec(&cpu_buffer->committing);
2090 local_dec(&cpu_buffer->commits);
2091 return NULL;
2092 }
85bac32c 2093#endif
62f0b3eb 2094
be957c44 2095 length = rb_calculate_event_length(length);
bf41a158 2096 again:
818e3dd3
SR
2097 /*
2098 * We allow for interrupts to reenter here and do a trace.
2099 * If one does, it will cause this original code to loop
2100 * back here. Even with heavy interrupts happening, this
2101 * should only happen a few times in a row. If this happens
2102 * 1000 times in a row, there must be either an interrupt
2103 * storm or we have something buggy.
2104 * Bail!
2105 */
3e89c7bb 2106 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 2107 goto out_fail;
818e3dd3 2108
6d3f1e12 2109 ts = rb_time_stamp(cpu_buffer->buffer);
7a8e76a3 2110
bf41a158
SR
2111 /*
2112 * Only the first commit can update the timestamp.
2113 * Yes there is a race here. If an interrupt comes in
2114 * just after the conditional and it traces too, then it
2115 * will also check the deltas. More than one timestamp may
2116 * also be made. But only the entry that did the actual
2117 * commit will be something other than zero.
2118 */
0f0c85fc
SR
2119 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2120 rb_page_write(cpu_buffer->tail_page) ==
2121 rb_commit_index(cpu_buffer))) {
168b6b1d 2122 u64 diff;
bf41a158 2123
168b6b1d 2124 diff = ts - cpu_buffer->write_stamp;
7a8e76a3 2125
168b6b1d 2126 /* make sure this diff is calculated here */
bf41a158
SR
2127 barrier();
2128
2129 /* Did the write stamp get updated already? */
2130 if (unlikely(ts < cpu_buffer->write_stamp))
168b6b1d 2131 goto get_event;
bf41a158 2132
168b6b1d
SR
2133 delta = diff;
2134 if (unlikely(test_time_stamp(delta))) {
7a8e76a3 2135
bf41a158 2136 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
bf41a158 2137 if (commit == -EBUSY)
fa743953 2138 goto out_fail;
bf41a158
SR
2139
2140 if (commit == -EAGAIN)
2141 goto again;
2142
2143 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 2144 }
168b6b1d 2145 }
7a8e76a3 2146
168b6b1d 2147 get_event:
1cd8d735 2148 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
168b6b1d 2149 if (unlikely(PTR_ERR(event) == -EAGAIN))
bf41a158
SR
2150 goto again;
2151
fa743953
SR
2152 if (!event)
2153 goto out_fail;
7a8e76a3 2154
fa743953 2155 if (!rb_event_is_commit(cpu_buffer, event))
7a8e76a3
SR
2156 delta = 0;
2157
2158 event->time_delta = delta;
2159
2160 return event;
fa743953
SR
2161
2162 out_fail:
2163 rb_end_commit(cpu_buffer);
2164 return NULL;
7a8e76a3
SR
2165}
2166
1155de47
PM
2167#ifdef CONFIG_TRACING
2168
aa18efb2 2169#define TRACE_RECURSIVE_DEPTH 16
261842b7
SR
2170
2171static int trace_recursive_lock(void)
2172{
aa18efb2 2173 current->trace_recursion++;
261842b7 2174
aa18efb2
SR
2175 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2176 return 0;
e057a5e5 2177
aa18efb2
SR
2178 /* Disable all tracing before we do anything else */
2179 tracing_off_permanent();
261842b7 2180
7d7d2b80 2181 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
aa18efb2
SR
2182 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2183 current->trace_recursion,
2184 hardirq_count() >> HARDIRQ_SHIFT,
2185 softirq_count() >> SOFTIRQ_SHIFT,
2186 in_nmi());
261842b7 2187
aa18efb2
SR
2188 WARN_ON_ONCE(1);
2189 return -1;
261842b7
SR
2190}
2191
2192static void trace_recursive_unlock(void)
2193{
aa18efb2 2194 WARN_ON_ONCE(!current->trace_recursion);
261842b7 2195
aa18efb2 2196 current->trace_recursion--;
261842b7
SR
2197}
2198
1155de47
PM
2199#else
2200
2201#define trace_recursive_lock() (0)
2202#define trace_recursive_unlock() do { } while (0)
2203
2204#endif
2205
bf41a158
SR
2206static DEFINE_PER_CPU(int, rb_need_resched);
2207
7a8e76a3
SR
2208/**
2209 * ring_buffer_lock_reserve - reserve a part of the buffer
2210 * @buffer: the ring buffer to reserve from
2211 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
2212 *
2213 * Returns a reseverd event on the ring buffer to copy directly to.
2214 * The user of this interface will need to get the body to write into
2215 * and can use the ring_buffer_event_data() interface.
2216 *
2217 * The length is the length of the data needed, not the event length
2218 * which also includes the event header.
2219 *
2220 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2221 * If NULL is returned, then nothing has been allocated or locked.
2222 */
2223struct ring_buffer_event *
0a987751 2224ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
2225{
2226 struct ring_buffer_per_cpu *cpu_buffer;
2227 struct ring_buffer_event *event;
bf41a158 2228 int cpu, resched;
7a8e76a3 2229
033601a3 2230 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2231 return NULL;
2232
7a8e76a3
SR
2233 if (atomic_read(&buffer->record_disabled))
2234 return NULL;
2235
bf41a158 2236 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 2237 resched = ftrace_preempt_disable();
bf41a158 2238
261842b7
SR
2239 if (trace_recursive_lock())
2240 goto out_nocheck;
2241
7a8e76a3
SR
2242 cpu = raw_smp_processor_id();
2243
9e01c1b7 2244 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2245 goto out;
7a8e76a3
SR
2246
2247 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2248
2249 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 2250 goto out;
7a8e76a3 2251
be957c44 2252 if (length > BUF_MAX_DATA_SIZE)
bf41a158 2253 goto out;
7a8e76a3 2254
62f0b3eb 2255 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 2256 if (!event)
d769041f 2257 goto out;
7a8e76a3 2258
bf41a158
SR
2259 /*
2260 * Need to store resched state on this cpu.
2261 * Only the first needs to.
2262 */
2263
2264 if (preempt_count() == 1)
2265 per_cpu(rb_need_resched, cpu) = resched;
2266
7a8e76a3
SR
2267 return event;
2268
d769041f 2269 out:
261842b7
SR
2270 trace_recursive_unlock();
2271
2272 out_nocheck:
182e9f5f 2273 ftrace_preempt_enable(resched);
7a8e76a3
SR
2274 return NULL;
2275}
c4f50183 2276EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 2277
a1863c21
SR
2278static void
2279rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
7a8e76a3
SR
2280 struct ring_buffer_event *event)
2281{
fa743953
SR
2282 /*
2283 * The event first in the commit queue updates the
2284 * time stamp.
2285 */
2286 if (rb_event_is_commit(cpu_buffer, event))
2287 cpu_buffer->write_stamp += event->time_delta;
a1863c21 2288}
bf41a158 2289
a1863c21
SR
2290static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2291 struct ring_buffer_event *event)
2292{
2293 local_inc(&cpu_buffer->entries);
2294 rb_update_write_stamp(cpu_buffer, event);
fa743953 2295 rb_end_commit(cpu_buffer);
7a8e76a3
SR
2296}
2297
2298/**
2299 * ring_buffer_unlock_commit - commit a reserved
2300 * @buffer: The buffer to commit to
2301 * @event: The event pointer to commit.
7a8e76a3
SR
2302 *
2303 * This commits the data to the ring buffer, and releases any locks held.
2304 *
2305 * Must be paired with ring_buffer_lock_reserve.
2306 */
2307int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 2308 struct ring_buffer_event *event)
7a8e76a3
SR
2309{
2310 struct ring_buffer_per_cpu *cpu_buffer;
2311 int cpu = raw_smp_processor_id();
2312
2313 cpu_buffer = buffer->buffers[cpu];
2314
7a8e76a3
SR
2315 rb_commit(cpu_buffer, event);
2316
261842b7
SR
2317 trace_recursive_unlock();
2318
bf41a158
SR
2319 /*
2320 * Only the last preempt count needs to restore preemption.
2321 */
182e9f5f
SR
2322 if (preempt_count() == 1)
2323 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2324 else
bf41a158 2325 preempt_enable_no_resched_notrace();
7a8e76a3
SR
2326
2327 return 0;
2328}
c4f50183 2329EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 2330
f3b9aae1
FW
2331static inline void rb_event_discard(struct ring_buffer_event *event)
2332{
334d4169
LJ
2333 /* array[0] holds the actual length for the discarded event */
2334 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2335 event->type_len = RINGBUF_TYPE_PADDING;
f3b9aae1
FW
2336 /* time delta must be non zero */
2337 if (!event->time_delta)
2338 event->time_delta = 1;
2339}
2340
a1863c21
SR
2341/*
2342 * Decrement the entries to the page that an event is on.
2343 * The event does not even need to exist, only the pointer
2344 * to the page it is on. This may only be called before the commit
2345 * takes place.
2346 */
2347static inline void
2348rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2349 struct ring_buffer_event *event)
2350{
2351 unsigned long addr = (unsigned long)event;
2352 struct buffer_page *bpage = cpu_buffer->commit_page;
2353 struct buffer_page *start;
2354
2355 addr &= PAGE_MASK;
2356
2357 /* Do the likely case first */
2358 if (likely(bpage->page == (void *)addr)) {
2359 local_dec(&bpage->entries);
2360 return;
2361 }
2362
2363 /*
2364 * Because the commit page may be on the reader page we
2365 * start with the next page and check the end loop there.
2366 */
2367 rb_inc_page(cpu_buffer, &bpage);
2368 start = bpage;
2369 do {
2370 if (bpage->page == (void *)addr) {
2371 local_dec(&bpage->entries);
2372 return;
2373 }
2374 rb_inc_page(cpu_buffer, &bpage);
2375 } while (bpage != start);
2376
2377 /* commit not part of this buffer?? */
2378 RB_WARN_ON(cpu_buffer, 1);
2379}
2380
fa1b47dd
SR
2381/**
2382 * ring_buffer_commit_discard - discard an event that has not been committed
2383 * @buffer: the ring buffer
2384 * @event: non committed event to discard
2385 *
dc892f73
SR
2386 * Sometimes an event that is in the ring buffer needs to be ignored.
2387 * This function lets the user discard an event in the ring buffer
2388 * and then that event will not be read later.
2389 *
2390 * This function only works if it is called before the the item has been
2391 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
2392 * if another event has not been added behind it.
2393 *
2394 * If another event has been added behind it, it will set the event
2395 * up as discarded, and perform the commit.
2396 *
2397 * If this function is called, do not call ring_buffer_unlock_commit on
2398 * the event.
2399 */
2400void ring_buffer_discard_commit(struct ring_buffer *buffer,
2401 struct ring_buffer_event *event)
2402{
2403 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
2404 int cpu;
2405
2406 /* The event is discarded regardless */
f3b9aae1 2407 rb_event_discard(event);
fa1b47dd 2408
fa743953
SR
2409 cpu = smp_processor_id();
2410 cpu_buffer = buffer->buffers[cpu];
2411
fa1b47dd
SR
2412 /*
2413 * This must only be called if the event has not been
2414 * committed yet. Thus we can assume that preemption
2415 * is still disabled.
2416 */
fa743953 2417 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 2418
a1863c21 2419 rb_decrement_entry(cpu_buffer, event);
0f2541d2 2420 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 2421 goto out;
fa1b47dd
SR
2422
2423 /*
2424 * The commit is still visible by the reader, so we
a1863c21 2425 * must still update the timestamp.
fa1b47dd 2426 */
a1863c21 2427 rb_update_write_stamp(cpu_buffer, event);
fa1b47dd 2428 out:
fa743953 2429 rb_end_commit(cpu_buffer);
fa1b47dd 2430
f3b9aae1
FW
2431 trace_recursive_unlock();
2432
fa1b47dd
SR
2433 /*
2434 * Only the last preempt count needs to restore preemption.
2435 */
2436 if (preempt_count() == 1)
2437 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2438 else
2439 preempt_enable_no_resched_notrace();
2440
2441}
2442EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2443
7a8e76a3
SR
2444/**
2445 * ring_buffer_write - write data to the buffer without reserving
2446 * @buffer: The ring buffer to write to.
2447 * @length: The length of the data being written (excluding the event header)
2448 * @data: The data to write to the buffer.
2449 *
2450 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2451 * one function. If you already have the data to write to the buffer, it
2452 * may be easier to simply call this function.
2453 *
2454 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2455 * and not the length of the event which would hold the header.
2456 */
2457int ring_buffer_write(struct ring_buffer *buffer,
2458 unsigned long length,
2459 void *data)
2460{
2461 struct ring_buffer_per_cpu *cpu_buffer;
2462 struct ring_buffer_event *event;
7a8e76a3
SR
2463 void *body;
2464 int ret = -EBUSY;
bf41a158 2465 int cpu, resched;
7a8e76a3 2466
033601a3 2467 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2468 return -EBUSY;
2469
7a8e76a3
SR
2470 if (atomic_read(&buffer->record_disabled))
2471 return -EBUSY;
2472
182e9f5f 2473 resched = ftrace_preempt_disable();
bf41a158 2474
7a8e76a3
SR
2475 cpu = raw_smp_processor_id();
2476
9e01c1b7 2477 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2478 goto out;
7a8e76a3
SR
2479
2480 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2481
2482 if (atomic_read(&cpu_buffer->record_disabled))
2483 goto out;
2484
be957c44
SR
2485 if (length > BUF_MAX_DATA_SIZE)
2486 goto out;
2487
62f0b3eb 2488 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3
SR
2489 if (!event)
2490 goto out;
2491
2492 body = rb_event_data(event);
2493
2494 memcpy(body, data, length);
2495
2496 rb_commit(cpu_buffer, event);
2497
2498 ret = 0;
2499 out:
182e9f5f 2500 ftrace_preempt_enable(resched);
7a8e76a3
SR
2501
2502 return ret;
2503}
c4f50183 2504EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 2505
34a148bf 2506static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
2507{
2508 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 2509 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
2510 struct buffer_page *commit = cpu_buffer->commit_page;
2511
77ae365e
SR
2512 /* In case of error, head will be NULL */
2513 if (unlikely(!head))
2514 return 1;
2515
bf41a158
SR
2516 return reader->read == rb_page_commit(reader) &&
2517 (commit == reader ||
2518 (commit == head &&
2519 head->read == rb_page_commit(commit)));
2520}
2521
7a8e76a3
SR
2522/**
2523 * ring_buffer_record_disable - stop all writes into the buffer
2524 * @buffer: The ring buffer to stop writes to.
2525 *
2526 * This prevents all writes to the buffer. Any attempt to write
2527 * to the buffer after this will fail and return NULL.
2528 *
2529 * The caller should call synchronize_sched() after this.
2530 */
2531void ring_buffer_record_disable(struct ring_buffer *buffer)
2532{
2533 atomic_inc(&buffer->record_disabled);
2534}
c4f50183 2535EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
2536
2537/**
2538 * ring_buffer_record_enable - enable writes to the buffer
2539 * @buffer: The ring buffer to enable writes
2540 *
2541 * Note, multiple disables will need the same number of enables
2542 * to truely enable the writing (much like preempt_disable).
2543 */
2544void ring_buffer_record_enable(struct ring_buffer *buffer)
2545{
2546 atomic_dec(&buffer->record_disabled);
2547}
c4f50183 2548EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3
SR
2549
2550/**
2551 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2552 * @buffer: The ring buffer to stop writes to.
2553 * @cpu: The CPU buffer to stop
2554 *
2555 * This prevents all writes to the buffer. Any attempt to write
2556 * to the buffer after this will fail and return NULL.
2557 *
2558 * The caller should call synchronize_sched() after this.
2559 */
2560void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2561{
2562 struct ring_buffer_per_cpu *cpu_buffer;
2563
9e01c1b7 2564 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2565 return;
7a8e76a3
SR
2566
2567 cpu_buffer = buffer->buffers[cpu];
2568 atomic_inc(&cpu_buffer->record_disabled);
2569}
c4f50183 2570EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
2571
2572/**
2573 * ring_buffer_record_enable_cpu - enable writes to the buffer
2574 * @buffer: The ring buffer to enable writes
2575 * @cpu: The CPU to enable.
2576 *
2577 * Note, multiple disables will need the same number of enables
2578 * to truely enable the writing (much like preempt_disable).
2579 */
2580void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2581{
2582 struct ring_buffer_per_cpu *cpu_buffer;
2583
9e01c1b7 2584 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2585 return;
7a8e76a3
SR
2586
2587 cpu_buffer = buffer->buffers[cpu];
2588 atomic_dec(&cpu_buffer->record_disabled);
2589}
c4f50183 2590EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3
SR
2591
2592/**
2593 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2594 * @buffer: The ring buffer
2595 * @cpu: The per CPU buffer to get the entries from.
2596 */
2597unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2598{
2599 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2600 unsigned long ret;
7a8e76a3 2601
9e01c1b7 2602 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2603 return 0;
7a8e76a3
SR
2604
2605 cpu_buffer = buffer->buffers[cpu];
77ae365e 2606 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
e4906eff 2607 - cpu_buffer->read;
554f786e
SR
2608
2609 return ret;
7a8e76a3 2610}
c4f50183 2611EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
2612
2613/**
2614 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2615 * @buffer: The ring buffer
2616 * @cpu: The per CPU buffer to get the number of overruns from
2617 */
2618unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2619{
2620 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2621 unsigned long ret;
7a8e76a3 2622
9e01c1b7 2623 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2624 return 0;
7a8e76a3
SR
2625
2626 cpu_buffer = buffer->buffers[cpu];
77ae365e 2627 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
2628
2629 return ret;
7a8e76a3 2630}
c4f50183 2631EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 2632
f0d2c681
SR
2633/**
2634 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2635 * @buffer: The ring buffer
2636 * @cpu: The per CPU buffer to get the number of overruns from
2637 */
2638unsigned long
2639ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2640{
2641 struct ring_buffer_per_cpu *cpu_buffer;
2642 unsigned long ret;
2643
2644 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2645 return 0;
2646
2647 cpu_buffer = buffer->buffers[cpu];
77ae365e 2648 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
2649
2650 return ret;
2651}
2652EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2653
7a8e76a3
SR
2654/**
2655 * ring_buffer_entries - get the number of entries in a buffer
2656 * @buffer: The ring buffer
2657 *
2658 * Returns the total number of entries in the ring buffer
2659 * (all CPU entries)
2660 */
2661unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2662{
2663 struct ring_buffer_per_cpu *cpu_buffer;
2664 unsigned long entries = 0;
2665 int cpu;
2666
2667 /* if you care about this being correct, lock the buffer */
2668 for_each_buffer_cpu(buffer, cpu) {
2669 cpu_buffer = buffer->buffers[cpu];
e4906eff 2670 entries += (local_read(&cpu_buffer->entries) -
77ae365e 2671 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
7a8e76a3
SR
2672 }
2673
2674 return entries;
2675}
c4f50183 2676EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
2677
2678/**
67b394f7 2679 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
2680 * @buffer: The ring buffer
2681 *
2682 * Returns the total number of overruns in the ring buffer
2683 * (all CPU entries)
2684 */
2685unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2686{
2687 struct ring_buffer_per_cpu *cpu_buffer;
2688 unsigned long overruns = 0;
2689 int cpu;
2690
2691 /* if you care about this being correct, lock the buffer */
2692 for_each_buffer_cpu(buffer, cpu) {
2693 cpu_buffer = buffer->buffers[cpu];
77ae365e 2694 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
2695 }
2696
2697 return overruns;
2698}
c4f50183 2699EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 2700
642edba5 2701static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
2702{
2703 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2704
d769041f
SR
2705 /* Iterator usage is expected to have record disabled */
2706 if (list_empty(&cpu_buffer->reader_page->list)) {
77ae365e
SR
2707 iter->head_page = rb_set_head_page(cpu_buffer);
2708 if (unlikely(!iter->head_page))
2709 return;
2710 iter->head = iter->head_page->read;
d769041f
SR
2711 } else {
2712 iter->head_page = cpu_buffer->reader_page;
6f807acd 2713 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
2714 }
2715 if (iter->head)
2716 iter->read_stamp = cpu_buffer->read_stamp;
2717 else
abc9b56d 2718 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 2719}
f83c9d0f 2720
642edba5
SR
2721/**
2722 * ring_buffer_iter_reset - reset an iterator
2723 * @iter: The iterator to reset
2724 *
2725 * Resets the iterator, so that it will start from the beginning
2726 * again.
2727 */
2728void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2729{
554f786e 2730 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
2731 unsigned long flags;
2732
554f786e
SR
2733 if (!iter)
2734 return;
2735
2736 cpu_buffer = iter->cpu_buffer;
2737
642edba5
SR
2738 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2739 rb_iter_reset(iter);
f83c9d0f 2740 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2741}
c4f50183 2742EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
2743
2744/**
2745 * ring_buffer_iter_empty - check if an iterator has no more to read
2746 * @iter: The iterator to check
2747 */
2748int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2749{
2750 struct ring_buffer_per_cpu *cpu_buffer;
2751
2752 cpu_buffer = iter->cpu_buffer;
2753
bf41a158
SR
2754 return iter->head_page == cpu_buffer->commit_page &&
2755 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 2756}
c4f50183 2757EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
2758
2759static void
2760rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2761 struct ring_buffer_event *event)
2762{
2763 u64 delta;
2764
334d4169 2765 switch (event->type_len) {
7a8e76a3
SR
2766 case RINGBUF_TYPE_PADDING:
2767 return;
2768
2769 case RINGBUF_TYPE_TIME_EXTEND:
2770 delta = event->array[0];
2771 delta <<= TS_SHIFT;
2772 delta += event->time_delta;
2773 cpu_buffer->read_stamp += delta;
2774 return;
2775
2776 case RINGBUF_TYPE_TIME_STAMP:
2777 /* FIXME: not implemented */
2778 return;
2779
2780 case RINGBUF_TYPE_DATA:
2781 cpu_buffer->read_stamp += event->time_delta;
2782 return;
2783
2784 default:
2785 BUG();
2786 }
2787 return;
2788}
2789
2790static void
2791rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2792 struct ring_buffer_event *event)
2793{
2794 u64 delta;
2795
334d4169 2796 switch (event->type_len) {
7a8e76a3
SR
2797 case RINGBUF_TYPE_PADDING:
2798 return;
2799
2800 case RINGBUF_TYPE_TIME_EXTEND:
2801 delta = event->array[0];
2802 delta <<= TS_SHIFT;
2803 delta += event->time_delta;
2804 iter->read_stamp += delta;
2805 return;
2806
2807 case RINGBUF_TYPE_TIME_STAMP:
2808 /* FIXME: not implemented */
2809 return;
2810
2811 case RINGBUF_TYPE_DATA:
2812 iter->read_stamp += event->time_delta;
2813 return;
2814
2815 default:
2816 BUG();
2817 }
2818 return;
2819}
2820
d769041f
SR
2821static struct buffer_page *
2822rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2823{
d769041f
SR
2824 struct buffer_page *reader = NULL;
2825 unsigned long flags;
818e3dd3 2826 int nr_loops = 0;
77ae365e 2827 int ret;
d769041f 2828
3e03fb7f 2829 local_irq_save(flags);
0199c4e6 2830 arch_spin_lock(&cpu_buffer->lock);
d769041f
SR
2831
2832 again:
818e3dd3
SR
2833 /*
2834 * This should normally only loop twice. But because the
2835 * start of the reader inserts an empty page, it causes
2836 * a case where we will loop three times. There should be no
2837 * reason to loop four times (that I know of).
2838 */
3e89c7bb 2839 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
2840 reader = NULL;
2841 goto out;
2842 }
2843
d769041f
SR
2844 reader = cpu_buffer->reader_page;
2845
2846 /* If there's more to read, return this page */
bf41a158 2847 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
2848 goto out;
2849
2850 /* Never should we have an index greater than the size */
3e89c7bb
SR
2851 if (RB_WARN_ON(cpu_buffer,
2852 cpu_buffer->reader_page->read > rb_page_size(reader)))
2853 goto out;
d769041f
SR
2854
2855 /* check if we caught up to the tail */
2856 reader = NULL;
bf41a158 2857 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 2858 goto out;
7a8e76a3
SR
2859
2860 /*
d769041f 2861 * Reset the reader page to size zero.
7a8e76a3 2862 */
77ae365e
SR
2863 local_set(&cpu_buffer->reader_page->write, 0);
2864 local_set(&cpu_buffer->reader_page->entries, 0);
2865 local_set(&cpu_buffer->reader_page->page->commit, 0);
7a8e76a3 2866
77ae365e
SR
2867 spin:
2868 /*
2869 * Splice the empty reader page into the list around the head.
2870 */
2871 reader = rb_set_head_page(cpu_buffer);
d769041f
SR
2872 cpu_buffer->reader_page->list.next = reader->list.next;
2873 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 2874
3adc54fa
SR
2875 /*
2876 * cpu_buffer->pages just needs to point to the buffer, it
2877 * has no specific buffer page to point to. Lets move it out
2878 * of our way so we don't accidently swap it.
2879 */
2880 cpu_buffer->pages = reader->list.prev;
2881
77ae365e
SR
2882 /* The reader page will be pointing to the new head */
2883 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
7a8e76a3 2884
77ae365e
SR
2885 /*
2886 * Here's the tricky part.
2887 *
2888 * We need to move the pointer past the header page.
2889 * But we can only do that if a writer is not currently
2890 * moving it. The page before the header page has the
2891 * flag bit '1' set if it is pointing to the page we want.
2892 * but if the writer is in the process of moving it
2893 * than it will be '2' or already moved '0'.
2894 */
2895
2896 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
2897
2898 /*
77ae365e 2899 * If we did not convert it, then we must try again.
7a8e76a3 2900 */
77ae365e
SR
2901 if (!ret)
2902 goto spin;
7a8e76a3 2903
77ae365e
SR
2904 /*
2905 * Yeah! We succeeded in replacing the page.
2906 *
2907 * Now make the new head point back to the reader page.
2908 */
5ded3dc6 2909 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
77ae365e 2910 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
d769041f
SR
2911
2912 /* Finally update the reader page to the new head */
2913 cpu_buffer->reader_page = reader;
2914 rb_reset_reader_page(cpu_buffer);
2915
2916 goto again;
2917
2918 out:
0199c4e6 2919 arch_spin_unlock(&cpu_buffer->lock);
3e03fb7f 2920 local_irq_restore(flags);
d769041f
SR
2921
2922 return reader;
2923}
2924
2925static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2926{
2927 struct ring_buffer_event *event;
2928 struct buffer_page *reader;
2929 unsigned length;
2930
2931 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 2932
d769041f 2933 /* This function should not be called when buffer is empty */
3e89c7bb
SR
2934 if (RB_WARN_ON(cpu_buffer, !reader))
2935 return;
7a8e76a3 2936
d769041f
SR
2937 event = rb_reader_event(cpu_buffer);
2938
a1863c21 2939 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 2940 cpu_buffer->read++;
d769041f
SR
2941
2942 rb_update_read_stamp(cpu_buffer, event);
2943
2944 length = rb_event_length(event);
6f807acd 2945 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
2946}
2947
2948static void rb_advance_iter(struct ring_buffer_iter *iter)
2949{
2950 struct ring_buffer *buffer;
2951 struct ring_buffer_per_cpu *cpu_buffer;
2952 struct ring_buffer_event *event;
2953 unsigned length;
2954
2955 cpu_buffer = iter->cpu_buffer;
2956 buffer = cpu_buffer->buffer;
2957
2958 /*
2959 * Check if we are at the end of the buffer.
2960 */
bf41a158 2961 if (iter->head >= rb_page_size(iter->head_page)) {
ea05b57c
SR
2962 /* discarded commits can make the page empty */
2963 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 2964 return;
d769041f 2965 rb_inc_iter(iter);
7a8e76a3
SR
2966 return;
2967 }
2968
2969 event = rb_iter_head_event(iter);
2970
2971 length = rb_event_length(event);
2972
2973 /*
2974 * This should not be called to advance the header if we are
2975 * at the tail of the buffer.
2976 */
3e89c7bb 2977 if (RB_WARN_ON(cpu_buffer,
f536aafc 2978 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
2979 (iter->head + length > rb_commit_index(cpu_buffer))))
2980 return;
7a8e76a3
SR
2981
2982 rb_update_iter_read_stamp(iter, event);
2983
2984 iter->head += length;
2985
2986 /* check for end of page padding */
bf41a158
SR
2987 if ((iter->head >= rb_page_size(iter->head_page)) &&
2988 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
2989 rb_advance_iter(iter);
2990}
2991
f83c9d0f 2992static struct ring_buffer_event *
d8eeb2d3 2993rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
7a8e76a3 2994{
7a8e76a3 2995 struct ring_buffer_event *event;
d769041f 2996 struct buffer_page *reader;
818e3dd3 2997 int nr_loops = 0;
7a8e76a3 2998
7a8e76a3 2999 again:
818e3dd3
SR
3000 /*
3001 * We repeat when a timestamp is encountered. It is possible
3002 * to get multiple timestamps from an interrupt entering just
ea05b57c
SR
3003 * as one timestamp is about to be written, or from discarded
3004 * commits. The most that we can have is the number on a single page.
818e3dd3 3005 */
ea05b57c 3006 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 3007 return NULL;
818e3dd3 3008
d769041f
SR
3009 reader = rb_get_reader_page(cpu_buffer);
3010 if (!reader)
7a8e76a3
SR
3011 return NULL;
3012
d769041f 3013 event = rb_reader_event(cpu_buffer);
7a8e76a3 3014
334d4169 3015 switch (event->type_len) {
7a8e76a3 3016 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3017 if (rb_null_event(event))
3018 RB_WARN_ON(cpu_buffer, 1);
3019 /*
3020 * Because the writer could be discarding every
3021 * event it creates (which would probably be bad)
3022 * if we were to go back to "again" then we may never
3023 * catch up, and will trigger the warn on, or lock
3024 * the box. Return the padding, and we will release
3025 * the current locks, and try again.
3026 */
2d622719 3027 return event;
7a8e76a3
SR
3028
3029 case RINGBUF_TYPE_TIME_EXTEND:
3030 /* Internal data, OK to advance */
d769041f 3031 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3032 goto again;
3033
3034 case RINGBUF_TYPE_TIME_STAMP:
3035 /* FIXME: not implemented */
d769041f 3036 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3037 goto again;
3038
3039 case RINGBUF_TYPE_DATA:
3040 if (ts) {
3041 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 3042 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 3043 cpu_buffer->cpu, ts);
7a8e76a3
SR
3044 }
3045 return event;
3046
3047 default:
3048 BUG();
3049 }
3050
3051 return NULL;
3052}
c4f50183 3053EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 3054
f83c9d0f
SR
3055static struct ring_buffer_event *
3056rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
3057{
3058 struct ring_buffer *buffer;
3059 struct ring_buffer_per_cpu *cpu_buffer;
3060 struct ring_buffer_event *event;
818e3dd3 3061 int nr_loops = 0;
7a8e76a3
SR
3062
3063 if (ring_buffer_iter_empty(iter))
3064 return NULL;
3065
3066 cpu_buffer = iter->cpu_buffer;
3067 buffer = cpu_buffer->buffer;
3068
3069 again:
818e3dd3 3070 /*
ea05b57c
SR
3071 * We repeat when a timestamp is encountered.
3072 * We can get multiple timestamps by nested interrupts or also
3073 * if filtering is on (discarding commits). Since discarding
3074 * commits can be frequent we can get a lot of timestamps.
3075 * But we limit them by not adding timestamps if they begin
3076 * at the start of a page.
818e3dd3 3077 */
ea05b57c 3078 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 3079 return NULL;
818e3dd3 3080
7a8e76a3
SR
3081 if (rb_per_cpu_empty(cpu_buffer))
3082 return NULL;
3083
3084 event = rb_iter_head_event(iter);
3085
334d4169 3086 switch (event->type_len) {
7a8e76a3 3087 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3088 if (rb_null_event(event)) {
3089 rb_inc_iter(iter);
3090 goto again;
3091 }
3092 rb_advance_iter(iter);
3093 return event;
7a8e76a3
SR
3094
3095 case RINGBUF_TYPE_TIME_EXTEND:
3096 /* Internal data, OK to advance */
3097 rb_advance_iter(iter);
3098 goto again;
3099
3100 case RINGBUF_TYPE_TIME_STAMP:
3101 /* FIXME: not implemented */
3102 rb_advance_iter(iter);
3103 goto again;
3104
3105 case RINGBUF_TYPE_DATA:
3106 if (ts) {
3107 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
3108 ring_buffer_normalize_time_stamp(buffer,
3109 cpu_buffer->cpu, ts);
7a8e76a3
SR
3110 }
3111 return event;
3112
3113 default:
3114 BUG();
3115 }
3116
3117 return NULL;
3118}
c4f50183 3119EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 3120
8d707e8e
SR
3121static inline int rb_ok_to_lock(void)
3122{
3123 /*
3124 * If an NMI die dumps out the content of the ring buffer
3125 * do not grab locks. We also permanently disable the ring
3126 * buffer too. A one time deal is all you get from reading
3127 * the ring buffer from an NMI.
3128 */
464e85eb 3129 if (likely(!in_nmi()))
8d707e8e
SR
3130 return 1;
3131
3132 tracing_off_permanent();
3133 return 0;
3134}
3135
f83c9d0f
SR
3136/**
3137 * ring_buffer_peek - peek at the next event to be read
3138 * @buffer: The ring buffer to read
3139 * @cpu: The cpu to peak at
3140 * @ts: The timestamp counter of this event.
3141 *
3142 * This will return the event that will be read next, but does
3143 * not consume the data.
3144 */
3145struct ring_buffer_event *
3146ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3147{
3148 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 3149 struct ring_buffer_event *event;
f83c9d0f 3150 unsigned long flags;
8d707e8e 3151 int dolock;
f83c9d0f 3152
554f786e 3153 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3154 return NULL;
554f786e 3155
8d707e8e 3156 dolock = rb_ok_to_lock();
2d622719 3157 again:
8d707e8e
SR
3158 local_irq_save(flags);
3159 if (dolock)
3160 spin_lock(&cpu_buffer->reader_lock);
d8eeb2d3 3161 event = rb_buffer_peek(cpu_buffer, ts);
469535a5
RR
3162 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3163 rb_advance_reader(cpu_buffer);
8d707e8e
SR
3164 if (dolock)
3165 spin_unlock(&cpu_buffer->reader_lock);
3166 local_irq_restore(flags);
f83c9d0f 3167
1b959e18 3168 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3169 goto again;
2d622719 3170
f83c9d0f
SR
3171 return event;
3172}
3173
3174/**
3175 * ring_buffer_iter_peek - peek at the next event to be read
3176 * @iter: The ring buffer iterator
3177 * @ts: The timestamp counter of this event.
3178 *
3179 * This will return the event that will be read next, but does
3180 * not increment the iterator.
3181 */
3182struct ring_buffer_event *
3183ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3184{
3185 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3186 struct ring_buffer_event *event;
3187 unsigned long flags;
3188
2d622719 3189 again:
f83c9d0f
SR
3190 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3191 event = rb_iter_peek(iter, ts);
3192 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3193
1b959e18 3194 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3195 goto again;
2d622719 3196
f83c9d0f
SR
3197 return event;
3198}
3199
7a8e76a3
SR
3200/**
3201 * ring_buffer_consume - return an event and consume it
3202 * @buffer: The ring buffer to get the next event from
3203 *
3204 * Returns the next event in the ring buffer, and that event is consumed.
3205 * Meaning, that sequential reads will keep returning a different event,
3206 * and eventually empty the ring buffer if the producer is slower.
3207 */
3208struct ring_buffer_event *
3209ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3210{
554f786e
SR
3211 struct ring_buffer_per_cpu *cpu_buffer;
3212 struct ring_buffer_event *event = NULL;
f83c9d0f 3213 unsigned long flags;
8d707e8e
SR
3214 int dolock;
3215
3216 dolock = rb_ok_to_lock();
7a8e76a3 3217
2d622719 3218 again:
554f786e
SR
3219 /* might be called in atomic */
3220 preempt_disable();
3221
9e01c1b7 3222 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 3223 goto out;
7a8e76a3 3224
554f786e 3225 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3226 local_irq_save(flags);
3227 if (dolock)
3228 spin_lock(&cpu_buffer->reader_lock);
f83c9d0f 3229
d8eeb2d3 3230 event = rb_buffer_peek(cpu_buffer, ts);
469535a5
RR
3231 if (event)
3232 rb_advance_reader(cpu_buffer);
7a8e76a3 3233
8d707e8e
SR
3234 if (dolock)
3235 spin_unlock(&cpu_buffer->reader_lock);
3236 local_irq_restore(flags);
f83c9d0f 3237
554f786e
SR
3238 out:
3239 preempt_enable();
3240
1b959e18 3241 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3242 goto again;
2d622719 3243
7a8e76a3
SR
3244 return event;
3245}
c4f50183 3246EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
3247
3248/**
3249 * ring_buffer_read_start - start a non consuming read of the buffer
3250 * @buffer: The ring buffer to read from
3251 * @cpu: The cpu buffer to iterate over
3252 *
3253 * This starts up an iteration through the buffer. It also disables
3254 * the recording to the buffer until the reading is finished.
3255 * This prevents the reading from being corrupted. This is not
3256 * a consuming read, so a producer is not expected.
3257 *
3258 * Must be paired with ring_buffer_finish.
3259 */
3260struct ring_buffer_iter *
3261ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3262{
3263 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 3264 struct ring_buffer_iter *iter;
d769041f 3265 unsigned long flags;
7a8e76a3 3266
9e01c1b7 3267 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3268 return NULL;
7a8e76a3
SR
3269
3270 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3271 if (!iter)
8aabee57 3272 return NULL;
7a8e76a3
SR
3273
3274 cpu_buffer = buffer->buffers[cpu];
3275
3276 iter->cpu_buffer = cpu_buffer;
3277
3278 atomic_inc(&cpu_buffer->record_disabled);
3279 synchronize_sched();
3280
f83c9d0f 3281 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
0199c4e6 3282 arch_spin_lock(&cpu_buffer->lock);
642edba5 3283 rb_iter_reset(iter);
0199c4e6 3284 arch_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3285 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
3286
3287 return iter;
3288}
c4f50183 3289EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
3290
3291/**
3292 * ring_buffer_finish - finish reading the iterator of the buffer
3293 * @iter: The iterator retrieved by ring_buffer_start
3294 *
3295 * This re-enables the recording to the buffer, and frees the
3296 * iterator.
3297 */
3298void
3299ring_buffer_read_finish(struct ring_buffer_iter *iter)
3300{
3301 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3302
3303 atomic_dec(&cpu_buffer->record_disabled);
3304 kfree(iter);
3305}
c4f50183 3306EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
3307
3308/**
3309 * ring_buffer_read - read the next item in the ring buffer by the iterator
3310 * @iter: The ring buffer iterator
3311 * @ts: The time stamp of the event read.
3312 *
3313 * This reads the next event in the ring buffer and increments the iterator.
3314 */
3315struct ring_buffer_event *
3316ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3317{
3318 struct ring_buffer_event *event;
f83c9d0f
SR
3319 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3320 unsigned long flags;
7a8e76a3 3321
f83c9d0f 3322 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 3323 again:
f83c9d0f 3324 event = rb_iter_peek(iter, ts);
7a8e76a3 3325 if (!event)
f83c9d0f 3326 goto out;
7a8e76a3 3327
7e9391cf
SR
3328 if (event->type_len == RINGBUF_TYPE_PADDING)
3329 goto again;
3330
7a8e76a3 3331 rb_advance_iter(iter);
f83c9d0f
SR
3332 out:
3333 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
3334
3335 return event;
3336}
c4f50183 3337EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
3338
3339/**
3340 * ring_buffer_size - return the size of the ring buffer (in bytes)
3341 * @buffer: The ring buffer.
3342 */
3343unsigned long ring_buffer_size(struct ring_buffer *buffer)
3344{
3345 return BUF_PAGE_SIZE * buffer->pages;
3346}
c4f50183 3347EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
3348
3349static void
3350rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3351{
77ae365e
SR
3352 rb_head_page_deactivate(cpu_buffer);
3353
7a8e76a3 3354 cpu_buffer->head_page
3adc54fa 3355 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 3356 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 3357 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 3358 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 3359
6f807acd 3360 cpu_buffer->head_page->read = 0;
bf41a158
SR
3361
3362 cpu_buffer->tail_page = cpu_buffer->head_page;
3363 cpu_buffer->commit_page = cpu_buffer->head_page;
3364
3365 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3366 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 3367 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 3368 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 3369 cpu_buffer->reader_page->read = 0;
7a8e76a3 3370
77ae365e
SR
3371 local_set(&cpu_buffer->commit_overrun, 0);
3372 local_set(&cpu_buffer->overrun, 0);
e4906eff 3373 local_set(&cpu_buffer->entries, 0);
fa743953
SR
3374 local_set(&cpu_buffer->committing, 0);
3375 local_set(&cpu_buffer->commits, 0);
77ae365e 3376 cpu_buffer->read = 0;
69507c06
SR
3377
3378 cpu_buffer->write_stamp = 0;
3379 cpu_buffer->read_stamp = 0;
77ae365e
SR
3380
3381 rb_head_page_activate(cpu_buffer);
7a8e76a3
SR
3382}
3383
3384/**
3385 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3386 * @buffer: The ring buffer to reset a per cpu buffer of
3387 * @cpu: The CPU buffer to be reset
3388 */
3389void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3390{
3391 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3392 unsigned long flags;
3393
9e01c1b7 3394 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3395 return;
7a8e76a3 3396
41ede23e
SR
3397 atomic_inc(&cpu_buffer->record_disabled);
3398
f83c9d0f
SR
3399 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3400
41b6a95d
SR
3401 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3402 goto out;
3403
0199c4e6 3404 arch_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
3405
3406 rb_reset_cpu(cpu_buffer);
3407
0199c4e6 3408 arch_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3409
41b6a95d 3410 out:
f83c9d0f 3411 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
41ede23e
SR
3412
3413 atomic_dec(&cpu_buffer->record_disabled);
7a8e76a3 3414}
c4f50183 3415EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
3416
3417/**
3418 * ring_buffer_reset - reset a ring buffer
3419 * @buffer: The ring buffer to reset all cpu buffers
3420 */
3421void ring_buffer_reset(struct ring_buffer *buffer)
3422{
7a8e76a3
SR
3423 int cpu;
3424
7a8e76a3 3425 for_each_buffer_cpu(buffer, cpu)
d769041f 3426 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 3427}
c4f50183 3428EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
3429
3430/**
3431 * rind_buffer_empty - is the ring buffer empty?
3432 * @buffer: The ring buffer to test
3433 */
3434int ring_buffer_empty(struct ring_buffer *buffer)
3435{
3436 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3437 unsigned long flags;
8d707e8e 3438 int dolock;
7a8e76a3 3439 int cpu;
d4788207 3440 int ret;
7a8e76a3 3441
8d707e8e 3442 dolock = rb_ok_to_lock();
7a8e76a3
SR
3443
3444 /* yes this is racy, but if you don't like the race, lock the buffer */
3445 for_each_buffer_cpu(buffer, cpu) {
3446 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3447 local_irq_save(flags);
3448 if (dolock)
3449 spin_lock(&cpu_buffer->reader_lock);
d4788207 3450 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3451 if (dolock)
3452 spin_unlock(&cpu_buffer->reader_lock);
3453 local_irq_restore(flags);
3454
d4788207 3455 if (!ret)
7a8e76a3
SR
3456 return 0;
3457 }
554f786e 3458
7a8e76a3
SR
3459 return 1;
3460}
c4f50183 3461EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
3462
3463/**
3464 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3465 * @buffer: The ring buffer
3466 * @cpu: The CPU buffer to test
3467 */
3468int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3469{
3470 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3471 unsigned long flags;
8d707e8e 3472 int dolock;
8aabee57 3473 int ret;
7a8e76a3 3474
9e01c1b7 3475 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3476 return 1;
7a8e76a3 3477
8d707e8e
SR
3478 dolock = rb_ok_to_lock();
3479
7a8e76a3 3480 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3481 local_irq_save(flags);
3482 if (dolock)
3483 spin_lock(&cpu_buffer->reader_lock);
554f786e 3484 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3485 if (dolock)
3486 spin_unlock(&cpu_buffer->reader_lock);
3487 local_irq_restore(flags);
554f786e
SR
3488
3489 return ret;
7a8e76a3 3490}
c4f50183 3491EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 3492
85bac32c 3493#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
3494/**
3495 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3496 * @buffer_a: One buffer to swap with
3497 * @buffer_b: The other buffer to swap with
3498 *
3499 * This function is useful for tracers that want to take a "snapshot"
3500 * of a CPU buffer and has another back up buffer lying around.
3501 * it is expected that the tracer handles the cpu buffer not being
3502 * used at the moment.
3503 */
3504int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3505 struct ring_buffer *buffer_b, int cpu)
3506{
3507 struct ring_buffer_per_cpu *cpu_buffer_a;
3508 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
3509 int ret = -EINVAL;
3510
9e01c1b7
RR
3511 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3512 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 3513 goto out;
7a8e76a3
SR
3514
3515 /* At least make sure the two buffers are somewhat the same */
6d102bc6 3516 if (buffer_a->pages != buffer_b->pages)
554f786e
SR
3517 goto out;
3518
3519 ret = -EAGAIN;
7a8e76a3 3520
97b17efe 3521 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 3522 goto out;
97b17efe
SR
3523
3524 if (atomic_read(&buffer_a->record_disabled))
554f786e 3525 goto out;
97b17efe
SR
3526
3527 if (atomic_read(&buffer_b->record_disabled))
554f786e 3528 goto out;
97b17efe 3529
7a8e76a3
SR
3530 cpu_buffer_a = buffer_a->buffers[cpu];
3531 cpu_buffer_b = buffer_b->buffers[cpu];
3532
97b17efe 3533 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 3534 goto out;
97b17efe
SR
3535
3536 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 3537 goto out;
97b17efe 3538
7a8e76a3
SR
3539 /*
3540 * We can't do a synchronize_sched here because this
3541 * function can be called in atomic context.
3542 * Normally this will be called from the same CPU as cpu.
3543 * If not it's up to the caller to protect this.
3544 */
3545 atomic_inc(&cpu_buffer_a->record_disabled);
3546 atomic_inc(&cpu_buffer_b->record_disabled);
3547
98277991
SR
3548 ret = -EBUSY;
3549 if (local_read(&cpu_buffer_a->committing))
3550 goto out_dec;
3551 if (local_read(&cpu_buffer_b->committing))
3552 goto out_dec;
3553
7a8e76a3
SR
3554 buffer_a->buffers[cpu] = cpu_buffer_b;
3555 buffer_b->buffers[cpu] = cpu_buffer_a;
3556
3557 cpu_buffer_b->buffer = buffer_a;
3558 cpu_buffer_a->buffer = buffer_b;
3559
98277991
SR
3560 ret = 0;
3561
3562out_dec:
7a8e76a3
SR
3563 atomic_dec(&cpu_buffer_a->record_disabled);
3564 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 3565out:
554f786e 3566 return ret;
7a8e76a3 3567}
c4f50183 3568EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 3569#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 3570
8789a9e7
SR
3571/**
3572 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3573 * @buffer: the buffer to allocate for.
3574 *
3575 * This function is used in conjunction with ring_buffer_read_page.
3576 * When reading a full page from the ring buffer, these functions
3577 * can be used to speed up the process. The calling function should
3578 * allocate a few pages first with this function. Then when it
3579 * needs to get pages from the ring buffer, it passes the result
3580 * of this function into ring_buffer_read_page, which will swap
3581 * the page that was allocated, with the read page of the buffer.
3582 *
3583 * Returns:
3584 * The page allocated, or NULL on error.
3585 */
3586void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3587{
044fa782 3588 struct buffer_data_page *bpage;
ef7a4a16 3589 unsigned long addr;
8789a9e7
SR
3590
3591 addr = __get_free_page(GFP_KERNEL);
3592 if (!addr)
3593 return NULL;
3594
044fa782 3595 bpage = (void *)addr;
8789a9e7 3596
ef7a4a16
SR
3597 rb_init_page(bpage);
3598
044fa782 3599 return bpage;
8789a9e7 3600}
d6ce96da 3601EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
3602
3603/**
3604 * ring_buffer_free_read_page - free an allocated read page
3605 * @buffer: the buffer the page was allocate for
3606 * @data: the page to free
3607 *
3608 * Free a page allocated from ring_buffer_alloc_read_page.
3609 */
3610void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3611{
3612 free_page((unsigned long)data);
3613}
d6ce96da 3614EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
3615
3616/**
3617 * ring_buffer_read_page - extract a page from the ring buffer
3618 * @buffer: buffer to extract from
3619 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 3620 * @len: amount to extract
8789a9e7
SR
3621 * @cpu: the cpu of the buffer to extract
3622 * @full: should the extraction only happen when the page is full.
3623 *
3624 * This function will pull out a page from the ring buffer and consume it.
3625 * @data_page must be the address of the variable that was returned
3626 * from ring_buffer_alloc_read_page. This is because the page might be used
3627 * to swap with a page in the ring buffer.
3628 *
3629 * for example:
b85fa01e 3630 * rpage = ring_buffer_alloc_read_page(buffer);
8789a9e7
SR
3631 * if (!rpage)
3632 * return error;
ef7a4a16 3633 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
3634 * if (ret >= 0)
3635 * process_page(rpage, ret);
8789a9e7
SR
3636 *
3637 * When @full is set, the function will not return true unless
3638 * the writer is off the reader page.
3639 *
3640 * Note: it is up to the calling functions to handle sleeps and wakeups.
3641 * The ring buffer can be used anywhere in the kernel and can not
3642 * blindly call wake_up. The layer that uses the ring buffer must be
3643 * responsible for that.
3644 *
3645 * Returns:
667d2412
LJ
3646 * >=0 if data has been transferred, returns the offset of consumed data.
3647 * <0 if no data has been transferred.
8789a9e7
SR
3648 */
3649int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 3650 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
3651{
3652 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3653 struct ring_buffer_event *event;
044fa782 3654 struct buffer_data_page *bpage;
ef7a4a16 3655 struct buffer_page *reader;
8789a9e7 3656 unsigned long flags;
ef7a4a16 3657 unsigned int commit;
667d2412 3658 unsigned int read;
4f3640f8 3659 u64 save_timestamp;
667d2412 3660 int ret = -1;
8789a9e7 3661
554f786e
SR
3662 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3663 goto out;
3664
474d32b6
SR
3665 /*
3666 * If len is not big enough to hold the page header, then
3667 * we can not copy anything.
3668 */
3669 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 3670 goto out;
474d32b6
SR
3671
3672 len -= BUF_PAGE_HDR_SIZE;
3673
8789a9e7 3674 if (!data_page)
554f786e 3675 goto out;
8789a9e7 3676
044fa782
SR
3677 bpage = *data_page;
3678 if (!bpage)
554f786e 3679 goto out;
8789a9e7
SR
3680
3681 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3682
ef7a4a16
SR
3683 reader = rb_get_reader_page(cpu_buffer);
3684 if (!reader)
554f786e 3685 goto out_unlock;
8789a9e7 3686
ef7a4a16
SR
3687 event = rb_reader_event(cpu_buffer);
3688
3689 read = reader->read;
3690 commit = rb_page_commit(reader);
667d2412 3691
8789a9e7 3692 /*
474d32b6
SR
3693 * If this page has been partially read or
3694 * if len is not big enough to read the rest of the page or
3695 * a writer is still on the page, then
3696 * we must copy the data from the page to the buffer.
3697 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 3698 */
474d32b6 3699 if (read || (len < (commit - read)) ||
ef7a4a16 3700 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 3701 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
3702 unsigned int rpos = read;
3703 unsigned int pos = 0;
ef7a4a16 3704 unsigned int size;
8789a9e7
SR
3705
3706 if (full)
554f786e 3707 goto out_unlock;
8789a9e7 3708
ef7a4a16
SR
3709 if (len > (commit - read))
3710 len = (commit - read);
3711
3712 size = rb_event_length(event);
3713
3714 if (len < size)
554f786e 3715 goto out_unlock;
ef7a4a16 3716
4f3640f8
SR
3717 /* save the current timestamp, since the user will need it */
3718 save_timestamp = cpu_buffer->read_stamp;
3719
ef7a4a16
SR
3720 /* Need to copy one event at a time */
3721 do {
474d32b6 3722 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
3723
3724 len -= size;
3725
3726 rb_advance_reader(cpu_buffer);
474d32b6
SR
3727 rpos = reader->read;
3728 pos += size;
ef7a4a16
SR
3729
3730 event = rb_reader_event(cpu_buffer);
3731 size = rb_event_length(event);
3732 } while (len > size);
667d2412
LJ
3733
3734 /* update bpage */
ef7a4a16 3735 local_set(&bpage->commit, pos);
4f3640f8 3736 bpage->time_stamp = save_timestamp;
ef7a4a16 3737
474d32b6
SR
3738 /* we copied everything to the beginning */
3739 read = 0;
8789a9e7 3740 } else {
afbab76a 3741 /* update the entry counter */
77ae365e 3742 cpu_buffer->read += rb_page_entries(reader);
afbab76a 3743
8789a9e7 3744 /* swap the pages */
044fa782 3745 rb_init_page(bpage);
ef7a4a16
SR
3746 bpage = reader->page;
3747 reader->page = *data_page;
3748 local_set(&reader->write, 0);
778c55d4 3749 local_set(&reader->entries, 0);
ef7a4a16 3750 reader->read = 0;
044fa782 3751 *data_page = bpage;
8789a9e7 3752 }
667d2412 3753 ret = read;
8789a9e7 3754
554f786e 3755 out_unlock:
8789a9e7
SR
3756 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3757
554f786e 3758 out:
8789a9e7
SR
3759 return ret;
3760}
d6ce96da 3761EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 3762
1155de47 3763#ifdef CONFIG_TRACING
a3583244
SR
3764static ssize_t
3765rb_simple_read(struct file *filp, char __user *ubuf,
3766 size_t cnt, loff_t *ppos)
3767{
5e39841c 3768 unsigned long *p = filp->private_data;
a3583244
SR
3769 char buf[64];
3770 int r;
3771
033601a3
SR
3772 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3773 r = sprintf(buf, "permanently disabled\n");
3774 else
3775 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
3776
3777 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3778}
3779
3780static ssize_t
3781rb_simple_write(struct file *filp, const char __user *ubuf,
3782 size_t cnt, loff_t *ppos)
3783{
5e39841c 3784 unsigned long *p = filp->private_data;
a3583244 3785 char buf[64];
5e39841c 3786 unsigned long val;
a3583244
SR
3787 int ret;
3788
3789 if (cnt >= sizeof(buf))
3790 return -EINVAL;
3791
3792 if (copy_from_user(&buf, ubuf, cnt))
3793 return -EFAULT;
3794
3795 buf[cnt] = 0;
3796
3797 ret = strict_strtoul(buf, 10, &val);
3798 if (ret < 0)
3799 return ret;
3800
033601a3
SR
3801 if (val)
3802 set_bit(RB_BUFFERS_ON_BIT, p);
3803 else
3804 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
3805
3806 (*ppos)++;
3807
3808 return cnt;
3809}
3810
5e2336a0 3811static const struct file_operations rb_simple_fops = {
a3583244
SR
3812 .open = tracing_open_generic,
3813 .read = rb_simple_read,
3814 .write = rb_simple_write,
3815};
3816
3817
3818static __init int rb_init_debugfs(void)
3819{
3820 struct dentry *d_tracer;
a3583244
SR
3821
3822 d_tracer = tracing_init_dentry();
3823
5452af66
FW
3824 trace_create_file("tracing_on", 0644, d_tracer,
3825 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
3826
3827 return 0;
3828}
3829
3830fs_initcall(rb_init_debugfs);
1155de47 3831#endif
554f786e 3832
59222efe 3833#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
3834static int rb_cpu_notify(struct notifier_block *self,
3835 unsigned long action, void *hcpu)
554f786e
SR
3836{
3837 struct ring_buffer *buffer =
3838 container_of(self, struct ring_buffer, cpu_notify);
3839 long cpu = (long)hcpu;
3840
3841 switch (action) {
3842 case CPU_UP_PREPARE:
3843 case CPU_UP_PREPARE_FROZEN:
3f237a79 3844 if (cpumask_test_cpu(cpu, buffer->cpumask))
554f786e
SR
3845 return NOTIFY_OK;
3846
3847 buffer->buffers[cpu] =
3848 rb_allocate_cpu_buffer(buffer, cpu);
3849 if (!buffer->buffers[cpu]) {
3850 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3851 cpu);
3852 return NOTIFY_OK;
3853 }
3854 smp_wmb();
3f237a79 3855 cpumask_set_cpu(cpu, buffer->cpumask);
554f786e
SR
3856 break;
3857 case CPU_DOWN_PREPARE:
3858 case CPU_DOWN_PREPARE_FROZEN:
3859 /*
3860 * Do nothing.
3861 * If we were to free the buffer, then the user would
3862 * lose any trace that was in the buffer.
3863 */
3864 break;
3865 default:
3866 break;
3867 }
3868 return NOTIFY_OK;
3869}
3870#endif