tracing: Remove cpu arg from the rb_time_stamp() function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
14131f2f 7#include <linux/trace_clock.h>
78d904b4 8#include <linux/ftrace_irq.h>
7a8e76a3
SR
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
a81bd80a 12#include <linux/hardirq.h>
1744a21d 13#include <linux/kmemcheck.h>
7a8e76a3
SR
14#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
7a8e76a3
SR
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
554f786e 20#include <linux/cpu.h>
7a8e76a3
SR
21#include <linux/fs.h>
22
182e9f5f
SR
23#include "trace.h"
24
d1b182a8
SR
25/*
26 * The ring buffer header is special. We must manually up keep it.
27 */
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
334d4169
LJ
32 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
d1b182a8
SR
34 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
334d4169
LJ
41 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
d1b182a8
SR
43
44 return ret;
45}
46
5cc98548
SR
47/*
48 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
033601a3
SR
115/*
116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133/*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
5e39841c 154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 155
474d32b6
SR
156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
a3583244
SR
158/**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164void tracing_on(void)
165{
033601a3 166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244 167}
c4f50183 168EXPORT_SYMBOL_GPL(tracing_on);
a3583244
SR
169
170/**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178void tracing_off(void)
179{
033601a3
SR
180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
c4f50183 182EXPORT_SYMBOL_GPL(tracing_off);
033601a3
SR
183
184/**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
c3706f00 188 * permanently.
033601a3
SR
189 */
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
193}
194
988ae9d6
SR
195/**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
e3d6bf0a 204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 205#define RB_ALIGNMENT 4U
334d4169 206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 207#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
334d4169
LJ
208
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
2d622719
TZ
217static inline int rb_null_event(struct ring_buffer_event *event)
218{
a1863c21 219 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
220}
221
222static void rb_event_set_padding(struct ring_buffer_event *event)
223{
a1863c21 224 /* padding has a NULL time_delta */
334d4169 225 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
226 event->time_delta = 0;
227}
228
34a148bf 229static unsigned
2d622719 230rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
231{
232 unsigned length;
233
334d4169
LJ
234 if (event->type_len)
235 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
240
241/* inline for ring buffer fast paths */
242static unsigned
243rb_event_length(struct ring_buffer_event *event)
244{
334d4169 245 switch (event->type_len) {
7a8e76a3 246 case RINGBUF_TYPE_PADDING:
2d622719
TZ
247 if (rb_null_event(event))
248 /* undefined */
249 return -1;
334d4169 250 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
2d622719 259 return rb_event_data_length(event);
7a8e76a3
SR
260 default:
261 BUG();
262 }
263 /* not hit */
264 return 0;
265}
266
267/**
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
270 */
271unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272{
465634ad 273 unsigned length = rb_event_length(event);
334d4169 274 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
7a8e76a3 280}
c4f50183 281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
282
283/* inline for ring buffer fast paths */
34a148bf 284static void *
7a8e76a3
SR
285rb_event_data(struct ring_buffer_event *event)
286{
334d4169 287 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 288 /* If length is in len field, then array[0] has the data */
334d4169 289 if (event->type_len)
7a8e76a3
SR
290 return (void *)&event->array[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event->array[1];
293}
294
295/**
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
298 */
299void *ring_buffer_event_data(struct ring_buffer_event *event)
300{
301 return rb_event_data(event);
302}
c4f50183 303EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
304
305#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 306 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
307
308#define TS_SHIFT 27
309#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310#define TS_DELTA_TEST (~TS_MASK)
311
abc9b56d 312struct buffer_data_page {
e4c2ce82 313 u64 time_stamp; /* page time stamp */
c3706f00 314 local_t commit; /* write committed index */
abc9b56d
SR
315 unsigned char data[]; /* data of buffer page */
316};
317
77ae365e
SR
318/*
319 * Note, the buffer_page list must be first. The buffer pages
320 * are allocated in cache lines, which means that each buffer
321 * page will be at the beginning of a cache line, and thus
322 * the least significant bits will be zero. We use this to
323 * add flags in the list struct pointers, to make the ring buffer
324 * lockless.
325 */
abc9b56d 326struct buffer_page {
778c55d4 327 struct list_head list; /* list of buffer pages */
abc9b56d 328 local_t write; /* index for next write */
6f807acd 329 unsigned read; /* index for next read */
778c55d4 330 local_t entries; /* entries on this page */
abc9b56d 331 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
332};
333
77ae365e
SR
334/*
335 * The buffer page counters, write and entries, must be reset
336 * atomically when crossing page boundaries. To synchronize this
337 * update, two counters are inserted into the number. One is
338 * the actual counter for the write position or count on the page.
339 *
340 * The other is a counter of updaters. Before an update happens
341 * the update partition of the counter is incremented. This will
342 * allow the updater to update the counter atomically.
343 *
344 * The counter is 20 bits, and the state data is 12.
345 */
346#define RB_WRITE_MASK 0xfffff
347#define RB_WRITE_INTCNT (1 << 20)
348
044fa782 349static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 350{
044fa782 351 local_set(&bpage->commit, 0);
abc9b56d
SR
352}
353
474d32b6
SR
354/**
355 * ring_buffer_page_len - the size of data on the page.
356 * @page: The page to read
357 *
358 * Returns the amount of data on the page, including buffer page header.
359 */
ef7a4a16
SR
360size_t ring_buffer_page_len(void *page)
361{
474d32b6
SR
362 return local_read(&((struct buffer_data_page *)page)->commit)
363 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
364}
365
ed56829c
SR
366/*
367 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
368 * this issue out.
369 */
34a148bf 370static void free_buffer_page(struct buffer_page *bpage)
ed56829c 371{
34a148bf 372 free_page((unsigned long)bpage->page);
e4c2ce82 373 kfree(bpage);
ed56829c
SR
374}
375
7a8e76a3
SR
376/*
377 * We need to fit the time_stamp delta into 27 bits.
378 */
379static inline int test_time_stamp(u64 delta)
380{
381 if (delta & TS_DELTA_TEST)
382 return 1;
383 return 0;
384}
385
474d32b6 386#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 387
be957c44
SR
388/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
390
ea05b57c
SR
391/* Max number of timestamps that can fit on a page */
392#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
393
d1b182a8
SR
394int ring_buffer_print_page_header(struct trace_seq *s)
395{
396 struct buffer_data_page field;
397 int ret;
398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\n",
401 (unsigned int)sizeof(field.time_stamp));
402
403 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404 "offset:%u;\tsize:%u;\n",
405 (unsigned int)offsetof(typeof(field), commit),
406 (unsigned int)sizeof(field.commit));
407
408 ret = trace_seq_printf(s, "\tfield: char data;\t"
409 "offset:%u;\tsize:%u;\n",
410 (unsigned int)offsetof(typeof(field), data),
411 (unsigned int)BUF_PAGE_SIZE);
412
413 return ret;
414}
415
7a8e76a3
SR
416/*
417 * head_page == tail_page && head == tail then buffer is empty.
418 */
419struct ring_buffer_per_cpu {
420 int cpu;
421 struct ring_buffer *buffer;
77ae365e 422 spinlock_t reader_lock; /* serialize readers */
3e03fb7f 423 raw_spinlock_t lock;
7a8e76a3 424 struct lock_class_key lock_key;
3adc54fa 425 struct list_head *pages;
6f807acd
SR
426 struct buffer_page *head_page; /* read from head */
427 struct buffer_page *tail_page; /* write to tail */
c3706f00 428 struct buffer_page *commit_page; /* committed pages */
d769041f 429 struct buffer_page *reader_page;
77ae365e
SR
430 local_t commit_overrun;
431 local_t overrun;
e4906eff 432 local_t entries;
fa743953
SR
433 local_t committing;
434 local_t commits;
77ae365e 435 unsigned long read;
7a8e76a3
SR
436 u64 write_stamp;
437 u64 read_stamp;
438 atomic_t record_disabled;
439};
440
441struct ring_buffer {
7a8e76a3
SR
442 unsigned pages;
443 unsigned flags;
444 int cpus;
7a8e76a3 445 atomic_t record_disabled;
00f62f61 446 cpumask_var_t cpumask;
7a8e76a3 447
1f8a6a10
PZ
448 struct lock_class_key *reader_lock_key;
449
7a8e76a3
SR
450 struct mutex mutex;
451
452 struct ring_buffer_per_cpu **buffers;
554f786e 453
59222efe 454#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
455 struct notifier_block cpu_notify;
456#endif
37886f6a 457 u64 (*clock)(void);
7a8e76a3
SR
458};
459
460struct ring_buffer_iter {
461 struct ring_buffer_per_cpu *cpu_buffer;
462 unsigned long head;
463 struct buffer_page *head_page;
464 u64 read_stamp;
465};
466
f536aafc 467/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
468#define RB_WARN_ON(b, cond) \
469 ({ \
470 int _____ret = unlikely(cond); \
471 if (_____ret) { \
472 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
473 struct ring_buffer_per_cpu *__b = \
474 (void *)b; \
475 atomic_inc(&__b->buffer->record_disabled); \
476 } else \
477 atomic_inc(&b->record_disabled); \
478 WARN_ON(1); \
479 } \
480 _____ret; \
3e89c7bb 481 })
f536aafc 482
37886f6a
SR
483/* Up this if you want to test the TIME_EXTENTS and normalization */
484#define DEBUG_SHIFT 0
485
6d3f1e12 486static inline u64 rb_time_stamp(struct ring_buffer *buffer)
88eb0125
SR
487{
488 /* shift to debug/test normalization and TIME_EXTENTS */
489 return buffer->clock() << DEBUG_SHIFT;
490}
491
37886f6a
SR
492u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
493{
494 u64 time;
495
496 preempt_disable_notrace();
6d3f1e12 497 time = rb_time_stamp(buffer);
37886f6a
SR
498 preempt_enable_no_resched_notrace();
499
500 return time;
501}
502EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
503
504void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
505 int cpu, u64 *ts)
506{
507 /* Just stupid testing the normalize function and deltas */
508 *ts >>= DEBUG_SHIFT;
509}
510EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
511
77ae365e
SR
512/*
513 * Making the ring buffer lockless makes things tricky.
514 * Although writes only happen on the CPU that they are on,
515 * and they only need to worry about interrupts. Reads can
516 * happen on any CPU.
517 *
518 * The reader page is always off the ring buffer, but when the
519 * reader finishes with a page, it needs to swap its page with
520 * a new one from the buffer. The reader needs to take from
521 * the head (writes go to the tail). But if a writer is in overwrite
522 * mode and wraps, it must push the head page forward.
523 *
524 * Here lies the problem.
525 *
526 * The reader must be careful to replace only the head page, and
527 * not another one. As described at the top of the file in the
528 * ASCII art, the reader sets its old page to point to the next
529 * page after head. It then sets the page after head to point to
530 * the old reader page. But if the writer moves the head page
531 * during this operation, the reader could end up with the tail.
532 *
533 * We use cmpxchg to help prevent this race. We also do something
534 * special with the page before head. We set the LSB to 1.
535 *
536 * When the writer must push the page forward, it will clear the
537 * bit that points to the head page, move the head, and then set
538 * the bit that points to the new head page.
539 *
540 * We also don't want an interrupt coming in and moving the head
541 * page on another writer. Thus we use the second LSB to catch
542 * that too. Thus:
543 *
544 * head->list->prev->next bit 1 bit 0
545 * ------- -------
546 * Normal page 0 0
547 * Points to head page 0 1
548 * New head page 1 0
549 *
550 * Note we can not trust the prev pointer of the head page, because:
551 *
552 * +----+ +-----+ +-----+
553 * | |------>| T |---X--->| N |
554 * | |<------| | | |
555 * +----+ +-----+ +-----+
556 * ^ ^ |
557 * | +-----+ | |
558 * +----------| R |----------+ |
559 * | |<-----------+
560 * +-----+
561 *
562 * Key: ---X--> HEAD flag set in pointer
563 * T Tail page
564 * R Reader page
565 * N Next page
566 *
567 * (see __rb_reserve_next() to see where this happens)
568 *
569 * What the above shows is that the reader just swapped out
570 * the reader page with a page in the buffer, but before it
571 * could make the new header point back to the new page added
572 * it was preempted by a writer. The writer moved forward onto
573 * the new page added by the reader and is about to move forward
574 * again.
575 *
576 * You can see, it is legitimate for the previous pointer of
577 * the head (or any page) not to point back to itself. But only
578 * temporarially.
579 */
580
581#define RB_PAGE_NORMAL 0UL
582#define RB_PAGE_HEAD 1UL
583#define RB_PAGE_UPDATE 2UL
584
585
586#define RB_FLAG_MASK 3UL
587
588/* PAGE_MOVED is not part of the mask */
589#define RB_PAGE_MOVED 4UL
590
591/*
592 * rb_list_head - remove any bit
593 */
594static struct list_head *rb_list_head(struct list_head *list)
595{
596 unsigned long val = (unsigned long)list;
597
598 return (struct list_head *)(val & ~RB_FLAG_MASK);
599}
600
601/*
6d3f1e12 602 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
603 *
604 * Because the reader may move the head_page pointer, we can
605 * not trust what the head page is (it may be pointing to
606 * the reader page). But if the next page is a header page,
607 * its flags will be non zero.
608 */
609static int inline
610rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
611 struct buffer_page *page, struct list_head *list)
612{
613 unsigned long val;
614
615 val = (unsigned long)list->next;
616
617 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
618 return RB_PAGE_MOVED;
619
620 return val & RB_FLAG_MASK;
621}
622
623/*
624 * rb_is_reader_page
625 *
626 * The unique thing about the reader page, is that, if the
627 * writer is ever on it, the previous pointer never points
628 * back to the reader page.
629 */
630static int rb_is_reader_page(struct buffer_page *page)
631{
632 struct list_head *list = page->list.prev;
633
634 return rb_list_head(list->next) != &page->list;
635}
636
637/*
638 * rb_set_list_to_head - set a list_head to be pointing to head.
639 */
640static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
641 struct list_head *list)
642{
643 unsigned long *ptr;
644
645 ptr = (unsigned long *)&list->next;
646 *ptr |= RB_PAGE_HEAD;
647 *ptr &= ~RB_PAGE_UPDATE;
648}
649
650/*
651 * rb_head_page_activate - sets up head page
652 */
653static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
654{
655 struct buffer_page *head;
656
657 head = cpu_buffer->head_page;
658 if (!head)
659 return;
660
661 /*
662 * Set the previous list pointer to have the HEAD flag.
663 */
664 rb_set_list_to_head(cpu_buffer, head->list.prev);
665}
666
667static void rb_list_head_clear(struct list_head *list)
668{
669 unsigned long *ptr = (unsigned long *)&list->next;
670
671 *ptr &= ~RB_FLAG_MASK;
672}
673
674/*
675 * rb_head_page_dactivate - clears head page ptr (for free list)
676 */
677static void
678rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
679{
680 struct list_head *hd;
681
682 /* Go through the whole list and clear any pointers found. */
683 rb_list_head_clear(cpu_buffer->pages);
684
685 list_for_each(hd, cpu_buffer->pages)
686 rb_list_head_clear(hd);
687}
688
689static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
690 struct buffer_page *head,
691 struct buffer_page *prev,
692 int old_flag, int new_flag)
693{
694 struct list_head *list;
695 unsigned long val = (unsigned long)&head->list;
696 unsigned long ret;
697
698 list = &prev->list;
699
700 val &= ~RB_FLAG_MASK;
701
08a40816
SR
702 ret = cmpxchg((unsigned long *)&list->next,
703 val | old_flag, val | new_flag);
77ae365e
SR
704
705 /* check if the reader took the page */
706 if ((ret & ~RB_FLAG_MASK) != val)
707 return RB_PAGE_MOVED;
708
709 return ret & RB_FLAG_MASK;
710}
711
712static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
713 struct buffer_page *head,
714 struct buffer_page *prev,
715 int old_flag)
716{
717 return rb_head_page_set(cpu_buffer, head, prev,
718 old_flag, RB_PAGE_UPDATE);
719}
720
721static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
722 struct buffer_page *head,
723 struct buffer_page *prev,
724 int old_flag)
725{
726 return rb_head_page_set(cpu_buffer, head, prev,
727 old_flag, RB_PAGE_HEAD);
728}
729
730static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
731 struct buffer_page *head,
732 struct buffer_page *prev,
733 int old_flag)
734{
735 return rb_head_page_set(cpu_buffer, head, prev,
736 old_flag, RB_PAGE_NORMAL);
737}
738
739static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
740 struct buffer_page **bpage)
741{
742 struct list_head *p = rb_list_head((*bpage)->list.next);
743
744 *bpage = list_entry(p, struct buffer_page, list);
745}
746
747static struct buffer_page *
748rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
749{
750 struct buffer_page *head;
751 struct buffer_page *page;
752 struct list_head *list;
753 int i;
754
755 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
756 return NULL;
757
758 /* sanity check */
759 list = cpu_buffer->pages;
760 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
761 return NULL;
762
763 page = head = cpu_buffer->head_page;
764 /*
765 * It is possible that the writer moves the header behind
766 * where we started, and we miss in one loop.
767 * A second loop should grab the header, but we'll do
768 * three loops just because I'm paranoid.
769 */
770 for (i = 0; i < 3; i++) {
771 do {
772 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
773 cpu_buffer->head_page = page;
774 return page;
775 }
776 rb_inc_page(cpu_buffer, &page);
777 } while (page != head);
778 }
779
780 RB_WARN_ON(cpu_buffer, 1);
781
782 return NULL;
783}
784
785static int rb_head_page_replace(struct buffer_page *old,
786 struct buffer_page *new)
787{
788 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
789 unsigned long val;
790 unsigned long ret;
791
792 val = *ptr & ~RB_FLAG_MASK;
793 val |= RB_PAGE_HEAD;
794
08a40816 795 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
77ae365e
SR
796
797 return ret == val;
798}
799
800/*
801 * rb_tail_page_update - move the tail page forward
802 *
803 * Returns 1 if moved tail page, 0 if someone else did.
804 */
805static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
806 struct buffer_page *tail_page,
807 struct buffer_page *next_page)
808{
809 struct buffer_page *old_tail;
810 unsigned long old_entries;
811 unsigned long old_write;
812 int ret = 0;
813
814 /*
815 * The tail page now needs to be moved forward.
816 *
817 * We need to reset the tail page, but without messing
818 * with possible erasing of data brought in by interrupts
819 * that have moved the tail page and are currently on it.
820 *
821 * We add a counter to the write field to denote this.
822 */
823 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
824 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
825
826 /*
827 * Just make sure we have seen our old_write and synchronize
828 * with any interrupts that come in.
829 */
830 barrier();
831
832 /*
833 * If the tail page is still the same as what we think
834 * it is, then it is up to us to update the tail
835 * pointer.
836 */
837 if (tail_page == cpu_buffer->tail_page) {
838 /* Zero the write counter */
839 unsigned long val = old_write & ~RB_WRITE_MASK;
840 unsigned long eval = old_entries & ~RB_WRITE_MASK;
841
842 /*
843 * This will only succeed if an interrupt did
844 * not come in and change it. In which case, we
845 * do not want to modify it.
da706d8b
LJ
846 *
847 * We add (void) to let the compiler know that we do not care
848 * about the return value of these functions. We use the
849 * cmpxchg to only update if an interrupt did not already
850 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 851 */
da706d8b
LJ
852 (void)local_cmpxchg(&next_page->write, old_write, val);
853 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
854
855 /*
856 * No need to worry about races with clearing out the commit.
857 * it only can increment when a commit takes place. But that
858 * only happens in the outer most nested commit.
859 */
860 local_set(&next_page->page->commit, 0);
861
862 old_tail = cmpxchg(&cpu_buffer->tail_page,
863 tail_page, next_page);
864
865 if (old_tail == tail_page)
866 ret = 1;
867 }
868
869 return ret;
870}
871
872static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
873 struct buffer_page *bpage)
874{
875 unsigned long val = (unsigned long)bpage;
876
877 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
878 return 1;
879
880 return 0;
881}
882
883/**
884 * rb_check_list - make sure a pointer to a list has the last bits zero
885 */
886static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
887 struct list_head *list)
888{
889 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
890 return 1;
891 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
892 return 1;
893 return 0;
894}
895
7a8e76a3
SR
896/**
897 * check_pages - integrity check of buffer pages
898 * @cpu_buffer: CPU buffer with pages to test
899 *
c3706f00 900 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
901 * been corrupted.
902 */
903static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
904{
3adc54fa 905 struct list_head *head = cpu_buffer->pages;
044fa782 906 struct buffer_page *bpage, *tmp;
7a8e76a3 907
77ae365e
SR
908 rb_head_page_deactivate(cpu_buffer);
909
3e89c7bb
SR
910 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
911 return -1;
912 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
913 return -1;
7a8e76a3 914
77ae365e
SR
915 if (rb_check_list(cpu_buffer, head))
916 return -1;
917
044fa782 918 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 919 if (RB_WARN_ON(cpu_buffer,
044fa782 920 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
921 return -1;
922 if (RB_WARN_ON(cpu_buffer,
044fa782 923 bpage->list.prev->next != &bpage->list))
3e89c7bb 924 return -1;
77ae365e
SR
925 if (rb_check_list(cpu_buffer, &bpage->list))
926 return -1;
7a8e76a3
SR
927 }
928
77ae365e
SR
929 rb_head_page_activate(cpu_buffer);
930
7a8e76a3
SR
931 return 0;
932}
933
7a8e76a3
SR
934static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
935 unsigned nr_pages)
936{
044fa782 937 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
938 unsigned long addr;
939 LIST_HEAD(pages);
940 unsigned i;
941
3adc54fa
SR
942 WARN_ON(!nr_pages);
943
7a8e76a3 944 for (i = 0; i < nr_pages; i++) {
044fa782 945 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
aa1e0e3b 946 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
044fa782 947 if (!bpage)
e4c2ce82 948 goto free_pages;
77ae365e
SR
949
950 rb_check_bpage(cpu_buffer, bpage);
951
044fa782 952 list_add(&bpage->list, &pages);
e4c2ce82 953
7a8e76a3
SR
954 addr = __get_free_page(GFP_KERNEL);
955 if (!addr)
956 goto free_pages;
044fa782
SR
957 bpage->page = (void *)addr;
958 rb_init_page(bpage->page);
7a8e76a3
SR
959 }
960
3adc54fa
SR
961 /*
962 * The ring buffer page list is a circular list that does not
963 * start and end with a list head. All page list items point to
964 * other pages.
965 */
966 cpu_buffer->pages = pages.next;
967 list_del(&pages);
7a8e76a3
SR
968
969 rb_check_pages(cpu_buffer);
970
971 return 0;
972
973 free_pages:
044fa782
SR
974 list_for_each_entry_safe(bpage, tmp, &pages, list) {
975 list_del_init(&bpage->list);
976 free_buffer_page(bpage);
7a8e76a3
SR
977 }
978 return -ENOMEM;
979}
980
981static struct ring_buffer_per_cpu *
982rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
983{
984 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 985 struct buffer_page *bpage;
d769041f 986 unsigned long addr;
7a8e76a3
SR
987 int ret;
988
989 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
990 GFP_KERNEL, cpu_to_node(cpu));
991 if (!cpu_buffer)
992 return NULL;
993
994 cpu_buffer->cpu = cpu;
995 cpu_buffer->buffer = buffer;
f83c9d0f 996 spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 997 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
3e03fb7f 998 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3 999
044fa782 1000 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1001 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1002 if (!bpage)
e4c2ce82
SR
1003 goto fail_free_buffer;
1004
77ae365e
SR
1005 rb_check_bpage(cpu_buffer, bpage);
1006
044fa782 1007 cpu_buffer->reader_page = bpage;
d769041f
SR
1008 addr = __get_free_page(GFP_KERNEL);
1009 if (!addr)
e4c2ce82 1010 goto fail_free_reader;
044fa782
SR
1011 bpage->page = (void *)addr;
1012 rb_init_page(bpage->page);
e4c2ce82 1013
d769041f 1014 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 1015
7a8e76a3
SR
1016 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1017 if (ret < 0)
d769041f 1018 goto fail_free_reader;
7a8e76a3
SR
1019
1020 cpu_buffer->head_page
3adc54fa 1021 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1022 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1023
77ae365e
SR
1024 rb_head_page_activate(cpu_buffer);
1025
7a8e76a3
SR
1026 return cpu_buffer;
1027
d769041f
SR
1028 fail_free_reader:
1029 free_buffer_page(cpu_buffer->reader_page);
1030
7a8e76a3
SR
1031 fail_free_buffer:
1032 kfree(cpu_buffer);
1033 return NULL;
1034}
1035
1036static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1037{
3adc54fa 1038 struct list_head *head = cpu_buffer->pages;
044fa782 1039 struct buffer_page *bpage, *tmp;
7a8e76a3 1040
d769041f
SR
1041 free_buffer_page(cpu_buffer->reader_page);
1042
77ae365e
SR
1043 rb_head_page_deactivate(cpu_buffer);
1044
3adc54fa
SR
1045 if (head) {
1046 list_for_each_entry_safe(bpage, tmp, head, list) {
1047 list_del_init(&bpage->list);
1048 free_buffer_page(bpage);
1049 }
1050 bpage = list_entry(head, struct buffer_page, list);
044fa782 1051 free_buffer_page(bpage);
7a8e76a3 1052 }
3adc54fa 1053
7a8e76a3
SR
1054 kfree(cpu_buffer);
1055}
1056
59222efe 1057#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
1058static int rb_cpu_notify(struct notifier_block *self,
1059 unsigned long action, void *hcpu);
554f786e
SR
1060#endif
1061
7a8e76a3
SR
1062/**
1063 * ring_buffer_alloc - allocate a new ring_buffer
68814b58 1064 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
1065 * @flags: attributes to set for the ring buffer.
1066 *
1067 * Currently the only flag that is available is the RB_FL_OVERWRITE
1068 * flag. This flag means that the buffer will overwrite old data
1069 * when the buffer wraps. If this flag is not set, the buffer will
1070 * drop data when the tail hits the head.
1071 */
1f8a6a10
PZ
1072struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1073 struct lock_class_key *key)
7a8e76a3
SR
1074{
1075 struct ring_buffer *buffer;
1076 int bsize;
1077 int cpu;
1078
1079 /* keep it in its own cache line */
1080 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1081 GFP_KERNEL);
1082 if (!buffer)
1083 return NULL;
1084
9e01c1b7
RR
1085 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1086 goto fail_free_buffer;
1087
7a8e76a3
SR
1088 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1089 buffer->flags = flags;
37886f6a 1090 buffer->clock = trace_clock_local;
1f8a6a10 1091 buffer->reader_lock_key = key;
7a8e76a3
SR
1092
1093 /* need at least two pages */
5f78abee
SR
1094 if (buffer->pages < 2)
1095 buffer->pages = 2;
7a8e76a3 1096
3bf832ce
FW
1097 /*
1098 * In case of non-hotplug cpu, if the ring-buffer is allocated
1099 * in early initcall, it will not be notified of secondary cpus.
1100 * In that off case, we need to allocate for all possible cpus.
1101 */
1102#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1103 get_online_cpus();
1104 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
1105#else
1106 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1107#endif
7a8e76a3
SR
1108 buffer->cpus = nr_cpu_ids;
1109
1110 bsize = sizeof(void *) * nr_cpu_ids;
1111 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1112 GFP_KERNEL);
1113 if (!buffer->buffers)
9e01c1b7 1114 goto fail_free_cpumask;
7a8e76a3
SR
1115
1116 for_each_buffer_cpu(buffer, cpu) {
1117 buffer->buffers[cpu] =
1118 rb_allocate_cpu_buffer(buffer, cpu);
1119 if (!buffer->buffers[cpu])
1120 goto fail_free_buffers;
1121 }
1122
59222efe 1123#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1124 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1125 buffer->cpu_notify.priority = 0;
1126 register_cpu_notifier(&buffer->cpu_notify);
1127#endif
1128
1129 put_online_cpus();
7a8e76a3
SR
1130 mutex_init(&buffer->mutex);
1131
1132 return buffer;
1133
1134 fail_free_buffers:
1135 for_each_buffer_cpu(buffer, cpu) {
1136 if (buffer->buffers[cpu])
1137 rb_free_cpu_buffer(buffer->buffers[cpu]);
1138 }
1139 kfree(buffer->buffers);
1140
9e01c1b7
RR
1141 fail_free_cpumask:
1142 free_cpumask_var(buffer->cpumask);
554f786e 1143 put_online_cpus();
9e01c1b7 1144
7a8e76a3
SR
1145 fail_free_buffer:
1146 kfree(buffer);
1147 return NULL;
1148}
1f8a6a10 1149EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1150
1151/**
1152 * ring_buffer_free - free a ring buffer.
1153 * @buffer: the buffer to free.
1154 */
1155void
1156ring_buffer_free(struct ring_buffer *buffer)
1157{
1158 int cpu;
1159
554f786e
SR
1160 get_online_cpus();
1161
59222efe 1162#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1163 unregister_cpu_notifier(&buffer->cpu_notify);
1164#endif
1165
7a8e76a3
SR
1166 for_each_buffer_cpu(buffer, cpu)
1167 rb_free_cpu_buffer(buffer->buffers[cpu]);
1168
554f786e
SR
1169 put_online_cpus();
1170
bd3f0221 1171 kfree(buffer->buffers);
9e01c1b7
RR
1172 free_cpumask_var(buffer->cpumask);
1173
7a8e76a3
SR
1174 kfree(buffer);
1175}
c4f50183 1176EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1177
37886f6a
SR
1178void ring_buffer_set_clock(struct ring_buffer *buffer,
1179 u64 (*clock)(void))
1180{
1181 buffer->clock = clock;
1182}
1183
7a8e76a3
SR
1184static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1185
1186static void
1187rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1188{
044fa782 1189 struct buffer_page *bpage;
7a8e76a3
SR
1190 struct list_head *p;
1191 unsigned i;
1192
1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched();
1195
77ae365e
SR
1196 rb_head_page_deactivate(cpu_buffer);
1197
7a8e76a3 1198 for (i = 0; i < nr_pages; i++) {
3adc54fa 1199 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
3e89c7bb 1200 return;
3adc54fa 1201 p = cpu_buffer->pages->next;
044fa782
SR
1202 bpage = list_entry(p, struct buffer_page, list);
1203 list_del_init(&bpage->list);
1204 free_buffer_page(bpage);
7a8e76a3 1205 }
3adc54fa 1206 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
3e89c7bb 1207 return;
7a8e76a3
SR
1208
1209 rb_reset_cpu(cpu_buffer);
1210
1211 rb_check_pages(cpu_buffer);
1212
1213 atomic_dec(&cpu_buffer->record_disabled);
1214
1215}
1216
1217static void
1218rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1219 struct list_head *pages, unsigned nr_pages)
1220{
044fa782 1221 struct buffer_page *bpage;
7a8e76a3
SR
1222 struct list_head *p;
1223 unsigned i;
1224
1225 atomic_inc(&cpu_buffer->record_disabled);
1226 synchronize_sched();
1227
77ae365e
SR
1228 spin_lock_irq(&cpu_buffer->reader_lock);
1229 rb_head_page_deactivate(cpu_buffer);
1230
7a8e76a3 1231 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
1232 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1233 return;
7a8e76a3 1234 p = pages->next;
044fa782
SR
1235 bpage = list_entry(p, struct buffer_page, list);
1236 list_del_init(&bpage->list);
3adc54fa 1237 list_add_tail(&bpage->list, cpu_buffer->pages);
7a8e76a3
SR
1238 }
1239 rb_reset_cpu(cpu_buffer);
77ae365e 1240 spin_unlock_irq(&cpu_buffer->reader_lock);
7a8e76a3
SR
1241
1242 rb_check_pages(cpu_buffer);
1243
1244 atomic_dec(&cpu_buffer->record_disabled);
1245}
1246
1247/**
1248 * ring_buffer_resize - resize the ring buffer
1249 * @buffer: the buffer to resize.
1250 * @size: the new size.
1251 *
1252 * The tracer is responsible for making sure that the buffer is
1253 * not being used while changing the size.
1254 * Note: We may be able to change the above requirement by using
1255 * RCU synchronizations.
1256 *
1257 * Minimum size is 2 * BUF_PAGE_SIZE.
1258 *
1259 * Returns -1 on failure.
1260 */
1261int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1262{
1263 struct ring_buffer_per_cpu *cpu_buffer;
1264 unsigned nr_pages, rm_pages, new_pages;
044fa782 1265 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
1266 unsigned long buffer_size;
1267 unsigned long addr;
1268 LIST_HEAD(pages);
1269 int i, cpu;
1270
ee51a1de
IM
1271 /*
1272 * Always succeed at resizing a non-existent buffer:
1273 */
1274 if (!buffer)
1275 return size;
1276
7a8e76a3
SR
1277 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1278 size *= BUF_PAGE_SIZE;
1279 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1280
1281 /* we need a minimum of two pages */
1282 if (size < BUF_PAGE_SIZE * 2)
1283 size = BUF_PAGE_SIZE * 2;
1284
1285 if (size == buffer_size)
1286 return size;
1287
1288 mutex_lock(&buffer->mutex);
554f786e 1289 get_online_cpus();
7a8e76a3
SR
1290
1291 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1292
1293 if (size < buffer_size) {
1294
1295 /* easy case, just free pages */
554f786e
SR
1296 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1297 goto out_fail;
7a8e76a3
SR
1298
1299 rm_pages = buffer->pages - nr_pages;
1300
1301 for_each_buffer_cpu(buffer, cpu) {
1302 cpu_buffer = buffer->buffers[cpu];
1303 rb_remove_pages(cpu_buffer, rm_pages);
1304 }
1305 goto out;
1306 }
1307
1308 /*
1309 * This is a bit more difficult. We only want to add pages
1310 * when we can allocate enough for all CPUs. We do this
1311 * by allocating all the pages and storing them on a local
1312 * link list. If we succeed in our allocation, then we
1313 * add these pages to the cpu_buffers. Otherwise we just free
1314 * them all and return -ENOMEM;
1315 */
554f786e
SR
1316 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1317 goto out_fail;
f536aafc 1318
7a8e76a3
SR
1319 new_pages = nr_pages - buffer->pages;
1320
1321 for_each_buffer_cpu(buffer, cpu) {
1322 for (i = 0; i < new_pages; i++) {
044fa782 1323 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
e4c2ce82
SR
1324 cache_line_size()),
1325 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1326 if (!bpage)
e4c2ce82 1327 goto free_pages;
044fa782 1328 list_add(&bpage->list, &pages);
7a8e76a3
SR
1329 addr = __get_free_page(GFP_KERNEL);
1330 if (!addr)
1331 goto free_pages;
044fa782
SR
1332 bpage->page = (void *)addr;
1333 rb_init_page(bpage->page);
7a8e76a3
SR
1334 }
1335 }
1336
1337 for_each_buffer_cpu(buffer, cpu) {
1338 cpu_buffer = buffer->buffers[cpu];
1339 rb_insert_pages(cpu_buffer, &pages, new_pages);
1340 }
1341
554f786e
SR
1342 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1343 goto out_fail;
7a8e76a3
SR
1344
1345 out:
1346 buffer->pages = nr_pages;
554f786e 1347 put_online_cpus();
7a8e76a3
SR
1348 mutex_unlock(&buffer->mutex);
1349
1350 return size;
1351
1352 free_pages:
044fa782
SR
1353 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1354 list_del_init(&bpage->list);
1355 free_buffer_page(bpage);
7a8e76a3 1356 }
554f786e 1357 put_online_cpus();
641d2f63 1358 mutex_unlock(&buffer->mutex);
7a8e76a3 1359 return -ENOMEM;
554f786e
SR
1360
1361 /*
1362 * Something went totally wrong, and we are too paranoid
1363 * to even clean up the mess.
1364 */
1365 out_fail:
1366 put_online_cpus();
1367 mutex_unlock(&buffer->mutex);
1368 return -1;
7a8e76a3 1369}
c4f50183 1370EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 1371
8789a9e7 1372static inline void *
044fa782 1373__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 1374{
044fa782 1375 return bpage->data + index;
8789a9e7
SR
1376}
1377
044fa782 1378static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 1379{
044fa782 1380 return bpage->page->data + index;
7a8e76a3
SR
1381}
1382
1383static inline struct ring_buffer_event *
d769041f 1384rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1385{
6f807acd
SR
1386 return __rb_page_index(cpu_buffer->reader_page,
1387 cpu_buffer->reader_page->read);
1388}
1389
7a8e76a3
SR
1390static inline struct ring_buffer_event *
1391rb_iter_head_event(struct ring_buffer_iter *iter)
1392{
6f807acd 1393 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
1394}
1395
77ae365e 1396static inline unsigned long rb_page_write(struct buffer_page *bpage)
bf41a158 1397{
77ae365e 1398 return local_read(&bpage->write) & RB_WRITE_MASK;
bf41a158
SR
1399}
1400
1401static inline unsigned rb_page_commit(struct buffer_page *bpage)
1402{
abc9b56d 1403 return local_read(&bpage->page->commit);
bf41a158
SR
1404}
1405
77ae365e
SR
1406static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1407{
1408 return local_read(&bpage->entries) & RB_WRITE_MASK;
1409}
1410
bf41a158
SR
1411/* Size is determined by what has been commited */
1412static inline unsigned rb_page_size(struct buffer_page *bpage)
1413{
1414 return rb_page_commit(bpage);
1415}
1416
1417static inline unsigned
1418rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1419{
1420 return rb_page_commit(cpu_buffer->commit_page);
1421}
1422
bf41a158
SR
1423static inline unsigned
1424rb_event_index(struct ring_buffer_event *event)
1425{
1426 unsigned long addr = (unsigned long)event;
1427
22f470f8 1428 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
bf41a158
SR
1429}
1430
0f0c85fc 1431static inline int
fa743953
SR
1432rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1433 struct ring_buffer_event *event)
bf41a158
SR
1434{
1435 unsigned long addr = (unsigned long)event;
1436 unsigned long index;
1437
1438 index = rb_event_index(event);
1439 addr &= PAGE_MASK;
1440
1441 return cpu_buffer->commit_page->page == (void *)addr &&
1442 rb_commit_index(cpu_buffer) == index;
1443}
1444
34a148bf 1445static void
bf41a158 1446rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1447{
77ae365e
SR
1448 unsigned long max_count;
1449
bf41a158
SR
1450 /*
1451 * We only race with interrupts and NMIs on this CPU.
1452 * If we own the commit event, then we can commit
1453 * all others that interrupted us, since the interruptions
1454 * are in stack format (they finish before they come
1455 * back to us). This allows us to do a simple loop to
1456 * assign the commit to the tail.
1457 */
a8ccf1d6 1458 again:
77ae365e
SR
1459 max_count = cpu_buffer->buffer->pages * 100;
1460
bf41a158 1461 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
77ae365e
SR
1462 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1463 return;
1464 if (RB_WARN_ON(cpu_buffer,
1465 rb_is_reader_page(cpu_buffer->tail_page)))
1466 return;
1467 local_set(&cpu_buffer->commit_page->page->commit,
1468 rb_page_write(cpu_buffer->commit_page));
bf41a158 1469 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1470 cpu_buffer->write_stamp =
1471 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1472 /* add barrier to keep gcc from optimizing too much */
1473 barrier();
1474 }
1475 while (rb_commit_index(cpu_buffer) !=
1476 rb_page_write(cpu_buffer->commit_page)) {
77ae365e
SR
1477
1478 local_set(&cpu_buffer->commit_page->page->commit,
1479 rb_page_write(cpu_buffer->commit_page));
1480 RB_WARN_ON(cpu_buffer,
1481 local_read(&cpu_buffer->commit_page->page->commit) &
1482 ~RB_WRITE_MASK);
bf41a158
SR
1483 barrier();
1484 }
a8ccf1d6
SR
1485
1486 /* again, keep gcc from optimizing */
1487 barrier();
1488
1489 /*
1490 * If an interrupt came in just after the first while loop
1491 * and pushed the tail page forward, we will be left with
1492 * a dangling commit that will never go forward.
1493 */
1494 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1495 goto again;
7a8e76a3
SR
1496}
1497
d769041f 1498static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1499{
abc9b56d 1500 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1501 cpu_buffer->reader_page->read = 0;
d769041f
SR
1502}
1503
34a148bf 1504static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1505{
1506 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1507
1508 /*
1509 * The iterator could be on the reader page (it starts there).
1510 * But the head could have moved, since the reader was
1511 * found. Check for this case and assign the iterator
1512 * to the head page instead of next.
1513 */
1514 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 1515 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f
SR
1516 else
1517 rb_inc_page(cpu_buffer, &iter->head_page);
1518
abc9b56d 1519 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1520 iter->head = 0;
1521}
1522
1523/**
1524 * ring_buffer_update_event - update event type and data
1525 * @event: the even to update
1526 * @type: the type of event
1527 * @length: the size of the event field in the ring buffer
1528 *
1529 * Update the type and data fields of the event. The length
1530 * is the actual size that is written to the ring buffer,
1531 * and with this, we can determine what to place into the
1532 * data field.
1533 */
34a148bf 1534static void
7a8e76a3
SR
1535rb_update_event(struct ring_buffer_event *event,
1536 unsigned type, unsigned length)
1537{
334d4169 1538 event->type_len = type;
7a8e76a3
SR
1539
1540 switch (type) {
1541
1542 case RINGBUF_TYPE_PADDING:
7a8e76a3 1543 case RINGBUF_TYPE_TIME_EXTEND:
7a8e76a3 1544 case RINGBUF_TYPE_TIME_STAMP:
7a8e76a3
SR
1545 break;
1546
334d4169 1547 case 0:
7a8e76a3 1548 length -= RB_EVNT_HDR_SIZE;
334d4169 1549 if (length > RB_MAX_SMALL_DATA)
7a8e76a3 1550 event->array[0] = length;
334d4169
LJ
1551 else
1552 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
1553 break;
1554 default:
1555 BUG();
1556 }
1557}
1558
77ae365e
SR
1559/*
1560 * rb_handle_head_page - writer hit the head page
1561 *
1562 * Returns: +1 to retry page
1563 * 0 to continue
1564 * -1 on error
1565 */
1566static int
1567rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1568 struct buffer_page *tail_page,
1569 struct buffer_page *next_page)
1570{
1571 struct buffer_page *new_head;
1572 int entries;
1573 int type;
1574 int ret;
1575
1576 entries = rb_page_entries(next_page);
1577
1578 /*
1579 * The hard part is here. We need to move the head
1580 * forward, and protect against both readers on
1581 * other CPUs and writers coming in via interrupts.
1582 */
1583 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1584 RB_PAGE_HEAD);
1585
1586 /*
1587 * type can be one of four:
1588 * NORMAL - an interrupt already moved it for us
1589 * HEAD - we are the first to get here.
1590 * UPDATE - we are the interrupt interrupting
1591 * a current move.
1592 * MOVED - a reader on another CPU moved the next
1593 * pointer to its reader page. Give up
1594 * and try again.
1595 */
1596
1597 switch (type) {
1598 case RB_PAGE_HEAD:
1599 /*
1600 * We changed the head to UPDATE, thus
1601 * it is our responsibility to update
1602 * the counters.
1603 */
1604 local_add(entries, &cpu_buffer->overrun);
1605
1606 /*
1607 * The entries will be zeroed out when we move the
1608 * tail page.
1609 */
1610
1611 /* still more to do */
1612 break;
1613
1614 case RB_PAGE_UPDATE:
1615 /*
1616 * This is an interrupt that interrupt the
1617 * previous update. Still more to do.
1618 */
1619 break;
1620 case RB_PAGE_NORMAL:
1621 /*
1622 * An interrupt came in before the update
1623 * and processed this for us.
1624 * Nothing left to do.
1625 */
1626 return 1;
1627 case RB_PAGE_MOVED:
1628 /*
1629 * The reader is on another CPU and just did
1630 * a swap with our next_page.
1631 * Try again.
1632 */
1633 return 1;
1634 default:
1635 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1636 return -1;
1637 }
1638
1639 /*
1640 * Now that we are here, the old head pointer is
1641 * set to UPDATE. This will keep the reader from
1642 * swapping the head page with the reader page.
1643 * The reader (on another CPU) will spin till
1644 * we are finished.
1645 *
1646 * We just need to protect against interrupts
1647 * doing the job. We will set the next pointer
1648 * to HEAD. After that, we set the old pointer
1649 * to NORMAL, but only if it was HEAD before.
1650 * otherwise we are an interrupt, and only
1651 * want the outer most commit to reset it.
1652 */
1653 new_head = next_page;
1654 rb_inc_page(cpu_buffer, &new_head);
1655
1656 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1657 RB_PAGE_NORMAL);
1658
1659 /*
1660 * Valid returns are:
1661 * HEAD - an interrupt came in and already set it.
1662 * NORMAL - One of two things:
1663 * 1) We really set it.
1664 * 2) A bunch of interrupts came in and moved
1665 * the page forward again.
1666 */
1667 switch (ret) {
1668 case RB_PAGE_HEAD:
1669 case RB_PAGE_NORMAL:
1670 /* OK */
1671 break;
1672 default:
1673 RB_WARN_ON(cpu_buffer, 1);
1674 return -1;
1675 }
1676
1677 /*
1678 * It is possible that an interrupt came in,
1679 * set the head up, then more interrupts came in
1680 * and moved it again. When we get back here,
1681 * the page would have been set to NORMAL but we
1682 * just set it back to HEAD.
1683 *
1684 * How do you detect this? Well, if that happened
1685 * the tail page would have moved.
1686 */
1687 if (ret == RB_PAGE_NORMAL) {
1688 /*
1689 * If the tail had moved passed next, then we need
1690 * to reset the pointer.
1691 */
1692 if (cpu_buffer->tail_page != tail_page &&
1693 cpu_buffer->tail_page != next_page)
1694 rb_head_page_set_normal(cpu_buffer, new_head,
1695 next_page,
1696 RB_PAGE_HEAD);
1697 }
1698
1699 /*
1700 * If this was the outer most commit (the one that
1701 * changed the original pointer from HEAD to UPDATE),
1702 * then it is up to us to reset it to NORMAL.
1703 */
1704 if (type == RB_PAGE_HEAD) {
1705 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1706 tail_page,
1707 RB_PAGE_UPDATE);
1708 if (RB_WARN_ON(cpu_buffer,
1709 ret != RB_PAGE_UPDATE))
1710 return -1;
1711 }
1712
1713 return 0;
1714}
1715
34a148bf 1716static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
1717{
1718 struct ring_buffer_event event; /* Used only for sizeof array */
1719
1720 /* zero length can cause confusions */
1721 if (!length)
1722 length = 1;
1723
1724 if (length > RB_MAX_SMALL_DATA)
1725 length += sizeof(event.array[0]);
1726
1727 length += RB_EVNT_HDR_SIZE;
1728 length = ALIGN(length, RB_ALIGNMENT);
1729
1730 return length;
1731}
1732
c7b09308
SR
1733static inline void
1734rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1735 struct buffer_page *tail_page,
1736 unsigned long tail, unsigned long length)
1737{
1738 struct ring_buffer_event *event;
1739
1740 /*
1741 * Only the event that crossed the page boundary
1742 * must fill the old tail_page with padding.
1743 */
1744 if (tail >= BUF_PAGE_SIZE) {
1745 local_sub(length, &tail_page->write);
1746 return;
1747 }
1748
1749 event = __rb_page_index(tail_page, tail);
b0b7065b 1750 kmemcheck_annotate_bitfield(event, bitfield);
c7b09308
SR
1751
1752 /*
1753 * If this event is bigger than the minimum size, then
1754 * we need to be careful that we don't subtract the
1755 * write counter enough to allow another writer to slip
1756 * in on this page.
1757 * We put in a discarded commit instead, to make sure
1758 * that this space is not used again.
1759 *
1760 * If we are less than the minimum size, we don't need to
1761 * worry about it.
1762 */
1763 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1764 /* No room for any events */
1765
1766 /* Mark the rest of the page with padding */
1767 rb_event_set_padding(event);
1768
1769 /* Set the write back to the previous setting */
1770 local_sub(length, &tail_page->write);
1771 return;
1772 }
1773
1774 /* Put in a discarded event */
1775 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1776 event->type_len = RINGBUF_TYPE_PADDING;
1777 /* time delta must be non zero */
1778 event->time_delta = 1;
c7b09308
SR
1779
1780 /* Set write to end of buffer */
1781 length = (tail + length) - BUF_PAGE_SIZE;
1782 local_sub(length, &tail_page->write);
1783}
6634ff26 1784
7a8e76a3 1785static struct ring_buffer_event *
6634ff26
SR
1786rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787 unsigned long length, unsigned long tail,
1788 struct buffer_page *commit_page,
1789 struct buffer_page *tail_page, u64 *ts)
7a8e76a3 1790{
7a8e76a3 1791 struct ring_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
1792 struct buffer_page *next_page;
1793 int ret;
aa20ae84
SR
1794
1795 next_page = tail_page;
1796
aa20ae84
SR
1797 rb_inc_page(cpu_buffer, &next_page);
1798
aa20ae84
SR
1799 /*
1800 * If for some reason, we had an interrupt storm that made
1801 * it all the way around the buffer, bail, and warn
1802 * about it.
1803 */
1804 if (unlikely(next_page == commit_page)) {
77ae365e 1805 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
1806 goto out_reset;
1807 }
1808
77ae365e
SR
1809 /*
1810 * This is where the fun begins!
1811 *
1812 * We are fighting against races between a reader that
1813 * could be on another CPU trying to swap its reader
1814 * page with the buffer head.
1815 *
1816 * We are also fighting against interrupts coming in and
1817 * moving the head or tail on us as well.
1818 *
1819 * If the next page is the head page then we have filled
1820 * the buffer, unless the commit page is still on the
1821 * reader page.
1822 */
1823 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
aa20ae84 1824
77ae365e
SR
1825 /*
1826 * If the commit is not on the reader page, then
1827 * move the header page.
1828 */
1829 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1830 /*
1831 * If we are not in overwrite mode,
1832 * this is easy, just stop here.
1833 */
1834 if (!(buffer->flags & RB_FL_OVERWRITE))
1835 goto out_reset;
1836
1837 ret = rb_handle_head_page(cpu_buffer,
1838 tail_page,
1839 next_page);
1840 if (ret < 0)
1841 goto out_reset;
1842 if (ret)
1843 goto out_again;
1844 } else {
1845 /*
1846 * We need to be careful here too. The
1847 * commit page could still be on the reader
1848 * page. We could have a small buffer, and
1849 * have filled up the buffer with events
1850 * from interrupts and such, and wrapped.
1851 *
1852 * Note, if the tail page is also the on the
1853 * reader_page, we let it move out.
1854 */
1855 if (unlikely((cpu_buffer->commit_page !=
1856 cpu_buffer->tail_page) &&
1857 (cpu_buffer->commit_page ==
1858 cpu_buffer->reader_page))) {
1859 local_inc(&cpu_buffer->commit_overrun);
1860 goto out_reset;
1861 }
aa20ae84
SR
1862 }
1863 }
1864
77ae365e
SR
1865 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1866 if (ret) {
1867 /*
1868 * Nested commits always have zero deltas, so
1869 * just reread the time stamp
1870 */
6d3f1e12 1871 *ts = rb_time_stamp(buffer);
77ae365e 1872 next_page->page->time_stamp = *ts;
aa20ae84
SR
1873 }
1874
77ae365e 1875 out_again:
aa20ae84 1876
77ae365e 1877 rb_reset_tail(cpu_buffer, tail_page, tail, length);
aa20ae84
SR
1878
1879 /* fail and let the caller try again */
1880 return ERR_PTR(-EAGAIN);
1881
45141d46 1882 out_reset:
6f3b3440 1883 /* reset write */
c7b09308 1884 rb_reset_tail(cpu_buffer, tail_page, tail, length);
6f3b3440 1885
bf41a158 1886 return NULL;
7a8e76a3
SR
1887}
1888
6634ff26
SR
1889static struct ring_buffer_event *
1890__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1891 unsigned type, unsigned long length, u64 *ts)
1892{
1893 struct buffer_page *tail_page, *commit_page;
1894 struct ring_buffer_event *event;
1895 unsigned long tail, write;
1896
1897 commit_page = cpu_buffer->commit_page;
1898 /* we just need to protect against interrupts */
1899 barrier();
1900 tail_page = cpu_buffer->tail_page;
1901 write = local_add_return(length, &tail_page->write);
77ae365e
SR
1902
1903 /* set write to only the index of the write */
1904 write &= RB_WRITE_MASK;
6634ff26
SR
1905 tail = write - length;
1906
1907 /* See if we shot pass the end of this buffer page */
1908 if (write > BUF_PAGE_SIZE)
1909 return rb_move_tail(cpu_buffer, length, tail,
1910 commit_page, tail_page, ts);
1911
1912 /* We reserved something on the buffer */
1913
6634ff26 1914 event = __rb_page_index(tail_page, tail);
1744a21d 1915 kmemcheck_annotate_bitfield(event, bitfield);
6634ff26
SR
1916 rb_update_event(event, type, length);
1917
1918 /* The passed in type is zero for DATA */
1919 if (likely(!type))
1920 local_inc(&tail_page->entries);
1921
1922 /*
fa743953
SR
1923 * If this is the first commit on the page, then update
1924 * its timestamp.
6634ff26 1925 */
fa743953
SR
1926 if (!tail)
1927 tail_page->page->time_stamp = *ts;
6634ff26
SR
1928
1929 return event;
1930}
1931
edd813bf
SR
1932static inline int
1933rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1934 struct ring_buffer_event *event)
1935{
1936 unsigned long new_index, old_index;
1937 struct buffer_page *bpage;
1938 unsigned long index;
1939 unsigned long addr;
1940
1941 new_index = rb_event_index(event);
1942 old_index = new_index + rb_event_length(event);
1943 addr = (unsigned long)event;
1944 addr &= PAGE_MASK;
1945
1946 bpage = cpu_buffer->tail_page;
1947
1948 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77ae365e
SR
1949 unsigned long write_mask =
1950 local_read(&bpage->write) & ~RB_WRITE_MASK;
edd813bf
SR
1951 /*
1952 * This is on the tail page. It is possible that
1953 * a write could come in and move the tail page
1954 * and write to the next page. That is fine
1955 * because we just shorten what is on this page.
1956 */
77ae365e
SR
1957 old_index += write_mask;
1958 new_index += write_mask;
edd813bf
SR
1959 index = local_cmpxchg(&bpage->write, old_index, new_index);
1960 if (index == old_index)
1961 return 1;
1962 }
1963
1964 /* could not discard */
1965 return 0;
1966}
1967
7a8e76a3
SR
1968static int
1969rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1970 u64 *ts, u64 *delta)
1971{
1972 struct ring_buffer_event *event;
1973 static int once;
bf41a158 1974 int ret;
7a8e76a3
SR
1975
1976 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1977 printk(KERN_WARNING "Delta way too big! %llu"
1978 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1979 (unsigned long long)*delta,
1980 (unsigned long long)*ts,
1981 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1982 WARN_ON(1);
1983 }
1984
1985 /*
1986 * The delta is too big, we to add a
1987 * new timestamp.
1988 */
1989 event = __rb_reserve_next(cpu_buffer,
1990 RINGBUF_TYPE_TIME_EXTEND,
1991 RB_LEN_TIME_EXTEND,
1992 ts);
1993 if (!event)
bf41a158 1994 return -EBUSY;
7a8e76a3 1995
bf41a158
SR
1996 if (PTR_ERR(event) == -EAGAIN)
1997 return -EAGAIN;
1998
1999 /* Only a commited time event can update the write stamp */
fa743953 2000 if (rb_event_is_commit(cpu_buffer, event)) {
bf41a158 2001 /*
fa743953
SR
2002 * If this is the first on the page, then it was
2003 * updated with the page itself. Try to discard it
2004 * and if we can't just make it zero.
bf41a158
SR
2005 */
2006 if (rb_event_index(event)) {
2007 event->time_delta = *delta & TS_MASK;
2008 event->array[0] = *delta >> TS_SHIFT;
2009 } else {
ea05b57c
SR
2010 /* try to discard, since we do not need this */
2011 if (!rb_try_to_discard(cpu_buffer, event)) {
2012 /* nope, just zero it */
2013 event->time_delta = 0;
2014 event->array[0] = 0;
2015 }
bf41a158 2016 }
7a8e76a3 2017 cpu_buffer->write_stamp = *ts;
bf41a158
SR
2018 /* let the caller know this was the commit */
2019 ret = 1;
2020 } else {
edd813bf
SR
2021 /* Try to discard the event */
2022 if (!rb_try_to_discard(cpu_buffer, event)) {
2023 /* Darn, this is just wasted space */
2024 event->time_delta = 0;
2025 event->array[0] = 0;
edd813bf 2026 }
f57a8a19 2027 ret = 0;
7a8e76a3
SR
2028 }
2029
bf41a158
SR
2030 *delta = 0;
2031
2032 return ret;
7a8e76a3
SR
2033}
2034
fa743953
SR
2035static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2036{
2037 local_inc(&cpu_buffer->committing);
2038 local_inc(&cpu_buffer->commits);
2039}
2040
2041static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2042{
2043 unsigned long commits;
2044
2045 if (RB_WARN_ON(cpu_buffer,
2046 !local_read(&cpu_buffer->committing)))
2047 return;
2048
2049 again:
2050 commits = local_read(&cpu_buffer->commits);
2051 /* synchronize with interrupts */
2052 barrier();
2053 if (local_read(&cpu_buffer->committing) == 1)
2054 rb_set_commit_to_write(cpu_buffer);
2055
2056 local_dec(&cpu_buffer->committing);
2057
2058 /* synchronize with interrupts */
2059 barrier();
2060
2061 /*
2062 * Need to account for interrupts coming in between the
2063 * updating of the commit page and the clearing of the
2064 * committing counter.
2065 */
2066 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2067 !local_read(&cpu_buffer->committing)) {
2068 local_inc(&cpu_buffer->committing);
2069 goto again;
2070 }
2071}
2072
7a8e76a3 2073static struct ring_buffer_event *
62f0b3eb
SR
2074rb_reserve_next_event(struct ring_buffer *buffer,
2075 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 2076 unsigned long length)
7a8e76a3
SR
2077{
2078 struct ring_buffer_event *event;
168b6b1d 2079 u64 ts, delta = 0;
bf41a158 2080 int commit = 0;
818e3dd3 2081 int nr_loops = 0;
7a8e76a3 2082
fa743953
SR
2083 rb_start_commit(cpu_buffer);
2084
85bac32c 2085#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
2086 /*
2087 * Due to the ability to swap a cpu buffer from a buffer
2088 * it is possible it was swapped before we committed.
2089 * (committing stops a swap). We check for it here and
2090 * if it happened, we have to fail the write.
2091 */
2092 barrier();
2093 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2094 local_dec(&cpu_buffer->committing);
2095 local_dec(&cpu_buffer->commits);
2096 return NULL;
2097 }
85bac32c 2098#endif
62f0b3eb 2099
be957c44 2100 length = rb_calculate_event_length(length);
bf41a158 2101 again:
818e3dd3
SR
2102 /*
2103 * We allow for interrupts to reenter here and do a trace.
2104 * If one does, it will cause this original code to loop
2105 * back here. Even with heavy interrupts happening, this
2106 * should only happen a few times in a row. If this happens
2107 * 1000 times in a row, there must be either an interrupt
2108 * storm or we have something buggy.
2109 * Bail!
2110 */
3e89c7bb 2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 2112 goto out_fail;
818e3dd3 2113
6d3f1e12 2114 ts = rb_time_stamp(cpu_buffer->buffer);
7a8e76a3 2115
bf41a158
SR
2116 /*
2117 * Only the first commit can update the timestamp.
2118 * Yes there is a race here. If an interrupt comes in
2119 * just after the conditional and it traces too, then it
2120 * will also check the deltas. More than one timestamp may
2121 * also be made. But only the entry that did the actual
2122 * commit will be something other than zero.
2123 */
0f0c85fc
SR
2124 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2125 rb_page_write(cpu_buffer->tail_page) ==
2126 rb_commit_index(cpu_buffer))) {
168b6b1d 2127 u64 diff;
bf41a158 2128
168b6b1d 2129 diff = ts - cpu_buffer->write_stamp;
7a8e76a3 2130
168b6b1d 2131 /* make sure this diff is calculated here */
bf41a158
SR
2132 barrier();
2133
2134 /* Did the write stamp get updated already? */
2135 if (unlikely(ts < cpu_buffer->write_stamp))
168b6b1d 2136 goto get_event;
bf41a158 2137
168b6b1d
SR
2138 delta = diff;
2139 if (unlikely(test_time_stamp(delta))) {
7a8e76a3 2140
bf41a158 2141 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
bf41a158 2142 if (commit == -EBUSY)
fa743953 2143 goto out_fail;
bf41a158
SR
2144
2145 if (commit == -EAGAIN)
2146 goto again;
2147
2148 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 2149 }
168b6b1d 2150 }
7a8e76a3 2151
168b6b1d 2152 get_event:
1cd8d735 2153 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
168b6b1d 2154 if (unlikely(PTR_ERR(event) == -EAGAIN))
bf41a158
SR
2155 goto again;
2156
fa743953
SR
2157 if (!event)
2158 goto out_fail;
7a8e76a3 2159
fa743953 2160 if (!rb_event_is_commit(cpu_buffer, event))
7a8e76a3
SR
2161 delta = 0;
2162
2163 event->time_delta = delta;
2164
2165 return event;
fa743953
SR
2166
2167 out_fail:
2168 rb_end_commit(cpu_buffer);
2169 return NULL;
7a8e76a3
SR
2170}
2171
1155de47
PM
2172#ifdef CONFIG_TRACING
2173
aa18efb2 2174#define TRACE_RECURSIVE_DEPTH 16
261842b7
SR
2175
2176static int trace_recursive_lock(void)
2177{
aa18efb2 2178 current->trace_recursion++;
261842b7 2179
aa18efb2
SR
2180 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2181 return 0;
e057a5e5 2182
aa18efb2
SR
2183 /* Disable all tracing before we do anything else */
2184 tracing_off_permanent();
261842b7 2185
7d7d2b80 2186 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
aa18efb2
SR
2187 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2188 current->trace_recursion,
2189 hardirq_count() >> HARDIRQ_SHIFT,
2190 softirq_count() >> SOFTIRQ_SHIFT,
2191 in_nmi());
261842b7 2192
aa18efb2
SR
2193 WARN_ON_ONCE(1);
2194 return -1;
261842b7
SR
2195}
2196
2197static void trace_recursive_unlock(void)
2198{
aa18efb2 2199 WARN_ON_ONCE(!current->trace_recursion);
261842b7 2200
aa18efb2 2201 current->trace_recursion--;
261842b7
SR
2202}
2203
1155de47
PM
2204#else
2205
2206#define trace_recursive_lock() (0)
2207#define trace_recursive_unlock() do { } while (0)
2208
2209#endif
2210
bf41a158
SR
2211static DEFINE_PER_CPU(int, rb_need_resched);
2212
7a8e76a3
SR
2213/**
2214 * ring_buffer_lock_reserve - reserve a part of the buffer
2215 * @buffer: the ring buffer to reserve from
2216 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
2217 *
2218 * Returns a reseverd event on the ring buffer to copy directly to.
2219 * The user of this interface will need to get the body to write into
2220 * and can use the ring_buffer_event_data() interface.
2221 *
2222 * The length is the length of the data needed, not the event length
2223 * which also includes the event header.
2224 *
2225 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2226 * If NULL is returned, then nothing has been allocated or locked.
2227 */
2228struct ring_buffer_event *
0a987751 2229ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
2230{
2231 struct ring_buffer_per_cpu *cpu_buffer;
2232 struct ring_buffer_event *event;
bf41a158 2233 int cpu, resched;
7a8e76a3 2234
033601a3 2235 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2236 return NULL;
2237
7a8e76a3
SR
2238 if (atomic_read(&buffer->record_disabled))
2239 return NULL;
2240
bf41a158 2241 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 2242 resched = ftrace_preempt_disable();
bf41a158 2243
261842b7
SR
2244 if (trace_recursive_lock())
2245 goto out_nocheck;
2246
7a8e76a3
SR
2247 cpu = raw_smp_processor_id();
2248
9e01c1b7 2249 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2250 goto out;
7a8e76a3
SR
2251
2252 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2253
2254 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 2255 goto out;
7a8e76a3 2256
be957c44 2257 if (length > BUF_MAX_DATA_SIZE)
bf41a158 2258 goto out;
7a8e76a3 2259
62f0b3eb 2260 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 2261 if (!event)
d769041f 2262 goto out;
7a8e76a3 2263
bf41a158
SR
2264 /*
2265 * Need to store resched state on this cpu.
2266 * Only the first needs to.
2267 */
2268
2269 if (preempt_count() == 1)
2270 per_cpu(rb_need_resched, cpu) = resched;
2271
7a8e76a3
SR
2272 return event;
2273
d769041f 2274 out:
261842b7
SR
2275 trace_recursive_unlock();
2276
2277 out_nocheck:
182e9f5f 2278 ftrace_preempt_enable(resched);
7a8e76a3
SR
2279 return NULL;
2280}
c4f50183 2281EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 2282
a1863c21
SR
2283static void
2284rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
7a8e76a3
SR
2285 struct ring_buffer_event *event)
2286{
fa743953
SR
2287 /*
2288 * The event first in the commit queue updates the
2289 * time stamp.
2290 */
2291 if (rb_event_is_commit(cpu_buffer, event))
2292 cpu_buffer->write_stamp += event->time_delta;
a1863c21 2293}
bf41a158 2294
a1863c21
SR
2295static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2296 struct ring_buffer_event *event)
2297{
2298 local_inc(&cpu_buffer->entries);
2299 rb_update_write_stamp(cpu_buffer, event);
fa743953 2300 rb_end_commit(cpu_buffer);
7a8e76a3
SR
2301}
2302
2303/**
2304 * ring_buffer_unlock_commit - commit a reserved
2305 * @buffer: The buffer to commit to
2306 * @event: The event pointer to commit.
7a8e76a3
SR
2307 *
2308 * This commits the data to the ring buffer, and releases any locks held.
2309 *
2310 * Must be paired with ring_buffer_lock_reserve.
2311 */
2312int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 2313 struct ring_buffer_event *event)
7a8e76a3
SR
2314{
2315 struct ring_buffer_per_cpu *cpu_buffer;
2316 int cpu = raw_smp_processor_id();
2317
2318 cpu_buffer = buffer->buffers[cpu];
2319
7a8e76a3
SR
2320 rb_commit(cpu_buffer, event);
2321
261842b7
SR
2322 trace_recursive_unlock();
2323
bf41a158
SR
2324 /*
2325 * Only the last preempt count needs to restore preemption.
2326 */
182e9f5f
SR
2327 if (preempt_count() == 1)
2328 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2329 else
bf41a158 2330 preempt_enable_no_resched_notrace();
7a8e76a3
SR
2331
2332 return 0;
2333}
c4f50183 2334EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 2335
f3b9aae1
FW
2336static inline void rb_event_discard(struct ring_buffer_event *event)
2337{
334d4169
LJ
2338 /* array[0] holds the actual length for the discarded event */
2339 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2340 event->type_len = RINGBUF_TYPE_PADDING;
f3b9aae1
FW
2341 /* time delta must be non zero */
2342 if (!event->time_delta)
2343 event->time_delta = 1;
2344}
2345
a1863c21
SR
2346/*
2347 * Decrement the entries to the page that an event is on.
2348 * The event does not even need to exist, only the pointer
2349 * to the page it is on. This may only be called before the commit
2350 * takes place.
2351 */
2352static inline void
2353rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2354 struct ring_buffer_event *event)
2355{
2356 unsigned long addr = (unsigned long)event;
2357 struct buffer_page *bpage = cpu_buffer->commit_page;
2358 struct buffer_page *start;
2359
2360 addr &= PAGE_MASK;
2361
2362 /* Do the likely case first */
2363 if (likely(bpage->page == (void *)addr)) {
2364 local_dec(&bpage->entries);
2365 return;
2366 }
2367
2368 /*
2369 * Because the commit page may be on the reader page we
2370 * start with the next page and check the end loop there.
2371 */
2372 rb_inc_page(cpu_buffer, &bpage);
2373 start = bpage;
2374 do {
2375 if (bpage->page == (void *)addr) {
2376 local_dec(&bpage->entries);
2377 return;
2378 }
2379 rb_inc_page(cpu_buffer, &bpage);
2380 } while (bpage != start);
2381
2382 /* commit not part of this buffer?? */
2383 RB_WARN_ON(cpu_buffer, 1);
2384}
2385
fa1b47dd
SR
2386/**
2387 * ring_buffer_commit_discard - discard an event that has not been committed
2388 * @buffer: the ring buffer
2389 * @event: non committed event to discard
2390 *
dc892f73
SR
2391 * Sometimes an event that is in the ring buffer needs to be ignored.
2392 * This function lets the user discard an event in the ring buffer
2393 * and then that event will not be read later.
2394 *
2395 * This function only works if it is called before the the item has been
2396 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
2397 * if another event has not been added behind it.
2398 *
2399 * If another event has been added behind it, it will set the event
2400 * up as discarded, and perform the commit.
2401 *
2402 * If this function is called, do not call ring_buffer_unlock_commit on
2403 * the event.
2404 */
2405void ring_buffer_discard_commit(struct ring_buffer *buffer,
2406 struct ring_buffer_event *event)
2407{
2408 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
2409 int cpu;
2410
2411 /* The event is discarded regardless */
f3b9aae1 2412 rb_event_discard(event);
fa1b47dd 2413
fa743953
SR
2414 cpu = smp_processor_id();
2415 cpu_buffer = buffer->buffers[cpu];
2416
fa1b47dd
SR
2417 /*
2418 * This must only be called if the event has not been
2419 * committed yet. Thus we can assume that preemption
2420 * is still disabled.
2421 */
fa743953 2422 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 2423
a1863c21 2424 rb_decrement_entry(cpu_buffer, event);
0f2541d2 2425 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 2426 goto out;
fa1b47dd
SR
2427
2428 /*
2429 * The commit is still visible by the reader, so we
a1863c21 2430 * must still update the timestamp.
fa1b47dd 2431 */
a1863c21 2432 rb_update_write_stamp(cpu_buffer, event);
fa1b47dd 2433 out:
fa743953 2434 rb_end_commit(cpu_buffer);
fa1b47dd 2435
f3b9aae1
FW
2436 trace_recursive_unlock();
2437
fa1b47dd
SR
2438 /*
2439 * Only the last preempt count needs to restore preemption.
2440 */
2441 if (preempt_count() == 1)
2442 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2443 else
2444 preempt_enable_no_resched_notrace();
2445
2446}
2447EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2448
7a8e76a3
SR
2449/**
2450 * ring_buffer_write - write data to the buffer without reserving
2451 * @buffer: The ring buffer to write to.
2452 * @length: The length of the data being written (excluding the event header)
2453 * @data: The data to write to the buffer.
2454 *
2455 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2456 * one function. If you already have the data to write to the buffer, it
2457 * may be easier to simply call this function.
2458 *
2459 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2460 * and not the length of the event which would hold the header.
2461 */
2462int ring_buffer_write(struct ring_buffer *buffer,
2463 unsigned long length,
2464 void *data)
2465{
2466 struct ring_buffer_per_cpu *cpu_buffer;
2467 struct ring_buffer_event *event;
7a8e76a3
SR
2468 void *body;
2469 int ret = -EBUSY;
bf41a158 2470 int cpu, resched;
7a8e76a3 2471
033601a3 2472 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2473 return -EBUSY;
2474
7a8e76a3
SR
2475 if (atomic_read(&buffer->record_disabled))
2476 return -EBUSY;
2477
182e9f5f 2478 resched = ftrace_preempt_disable();
bf41a158 2479
7a8e76a3
SR
2480 cpu = raw_smp_processor_id();
2481
9e01c1b7 2482 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2483 goto out;
7a8e76a3
SR
2484
2485 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2486
2487 if (atomic_read(&cpu_buffer->record_disabled))
2488 goto out;
2489
be957c44
SR
2490 if (length > BUF_MAX_DATA_SIZE)
2491 goto out;
2492
62f0b3eb 2493 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3
SR
2494 if (!event)
2495 goto out;
2496
2497 body = rb_event_data(event);
2498
2499 memcpy(body, data, length);
2500
2501 rb_commit(cpu_buffer, event);
2502
2503 ret = 0;
2504 out:
182e9f5f 2505 ftrace_preempt_enable(resched);
7a8e76a3
SR
2506
2507 return ret;
2508}
c4f50183 2509EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 2510
34a148bf 2511static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
2512{
2513 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 2514 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
2515 struct buffer_page *commit = cpu_buffer->commit_page;
2516
77ae365e
SR
2517 /* In case of error, head will be NULL */
2518 if (unlikely(!head))
2519 return 1;
2520
bf41a158
SR
2521 return reader->read == rb_page_commit(reader) &&
2522 (commit == reader ||
2523 (commit == head &&
2524 head->read == rb_page_commit(commit)));
2525}
2526
7a8e76a3
SR
2527/**
2528 * ring_buffer_record_disable - stop all writes into the buffer
2529 * @buffer: The ring buffer to stop writes to.
2530 *
2531 * This prevents all writes to the buffer. Any attempt to write
2532 * to the buffer after this will fail and return NULL.
2533 *
2534 * The caller should call synchronize_sched() after this.
2535 */
2536void ring_buffer_record_disable(struct ring_buffer *buffer)
2537{
2538 atomic_inc(&buffer->record_disabled);
2539}
c4f50183 2540EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
2541
2542/**
2543 * ring_buffer_record_enable - enable writes to the buffer
2544 * @buffer: The ring buffer to enable writes
2545 *
2546 * Note, multiple disables will need the same number of enables
2547 * to truely enable the writing (much like preempt_disable).
2548 */
2549void ring_buffer_record_enable(struct ring_buffer *buffer)
2550{
2551 atomic_dec(&buffer->record_disabled);
2552}
c4f50183 2553EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3
SR
2554
2555/**
2556 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2557 * @buffer: The ring buffer to stop writes to.
2558 * @cpu: The CPU buffer to stop
2559 *
2560 * This prevents all writes to the buffer. Any attempt to write
2561 * to the buffer after this will fail and return NULL.
2562 *
2563 * The caller should call synchronize_sched() after this.
2564 */
2565void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2566{
2567 struct ring_buffer_per_cpu *cpu_buffer;
2568
9e01c1b7 2569 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2570 return;
7a8e76a3
SR
2571
2572 cpu_buffer = buffer->buffers[cpu];
2573 atomic_inc(&cpu_buffer->record_disabled);
2574}
c4f50183 2575EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
2576
2577/**
2578 * ring_buffer_record_enable_cpu - enable writes to the buffer
2579 * @buffer: The ring buffer to enable writes
2580 * @cpu: The CPU to enable.
2581 *
2582 * Note, multiple disables will need the same number of enables
2583 * to truely enable the writing (much like preempt_disable).
2584 */
2585void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2586{
2587 struct ring_buffer_per_cpu *cpu_buffer;
2588
9e01c1b7 2589 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2590 return;
7a8e76a3
SR
2591
2592 cpu_buffer = buffer->buffers[cpu];
2593 atomic_dec(&cpu_buffer->record_disabled);
2594}
c4f50183 2595EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3
SR
2596
2597/**
2598 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2599 * @buffer: The ring buffer
2600 * @cpu: The per CPU buffer to get the entries from.
2601 */
2602unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2603{
2604 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2605 unsigned long ret;
7a8e76a3 2606
9e01c1b7 2607 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2608 return 0;
7a8e76a3
SR
2609
2610 cpu_buffer = buffer->buffers[cpu];
77ae365e 2611 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
e4906eff 2612 - cpu_buffer->read;
554f786e
SR
2613
2614 return ret;
7a8e76a3 2615}
c4f50183 2616EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
2617
2618/**
2619 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2620 * @buffer: The ring buffer
2621 * @cpu: The per CPU buffer to get the number of overruns from
2622 */
2623unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2624{
2625 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2626 unsigned long ret;
7a8e76a3 2627
9e01c1b7 2628 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2629 return 0;
7a8e76a3
SR
2630
2631 cpu_buffer = buffer->buffers[cpu];
77ae365e 2632 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
2633
2634 return ret;
7a8e76a3 2635}
c4f50183 2636EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 2637
f0d2c681
SR
2638/**
2639 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2640 * @buffer: The ring buffer
2641 * @cpu: The per CPU buffer to get the number of overruns from
2642 */
2643unsigned long
2644ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2645{
2646 struct ring_buffer_per_cpu *cpu_buffer;
2647 unsigned long ret;
2648
2649 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2650 return 0;
2651
2652 cpu_buffer = buffer->buffers[cpu];
77ae365e 2653 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
2654
2655 return ret;
2656}
2657EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2658
7a8e76a3
SR
2659/**
2660 * ring_buffer_entries - get the number of entries in a buffer
2661 * @buffer: The ring buffer
2662 *
2663 * Returns the total number of entries in the ring buffer
2664 * (all CPU entries)
2665 */
2666unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2667{
2668 struct ring_buffer_per_cpu *cpu_buffer;
2669 unsigned long entries = 0;
2670 int cpu;
2671
2672 /* if you care about this being correct, lock the buffer */
2673 for_each_buffer_cpu(buffer, cpu) {
2674 cpu_buffer = buffer->buffers[cpu];
e4906eff 2675 entries += (local_read(&cpu_buffer->entries) -
77ae365e 2676 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
7a8e76a3
SR
2677 }
2678
2679 return entries;
2680}
c4f50183 2681EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
2682
2683/**
67b394f7 2684 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
2685 * @buffer: The ring buffer
2686 *
2687 * Returns the total number of overruns in the ring buffer
2688 * (all CPU entries)
2689 */
2690unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2691{
2692 struct ring_buffer_per_cpu *cpu_buffer;
2693 unsigned long overruns = 0;
2694 int cpu;
2695
2696 /* if you care about this being correct, lock the buffer */
2697 for_each_buffer_cpu(buffer, cpu) {
2698 cpu_buffer = buffer->buffers[cpu];
77ae365e 2699 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
2700 }
2701
2702 return overruns;
2703}
c4f50183 2704EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 2705
642edba5 2706static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
2707{
2708 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2709
d769041f
SR
2710 /* Iterator usage is expected to have record disabled */
2711 if (list_empty(&cpu_buffer->reader_page->list)) {
77ae365e
SR
2712 iter->head_page = rb_set_head_page(cpu_buffer);
2713 if (unlikely(!iter->head_page))
2714 return;
2715 iter->head = iter->head_page->read;
d769041f
SR
2716 } else {
2717 iter->head_page = cpu_buffer->reader_page;
6f807acd 2718 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
2719 }
2720 if (iter->head)
2721 iter->read_stamp = cpu_buffer->read_stamp;
2722 else
abc9b56d 2723 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 2724}
f83c9d0f 2725
642edba5
SR
2726/**
2727 * ring_buffer_iter_reset - reset an iterator
2728 * @iter: The iterator to reset
2729 *
2730 * Resets the iterator, so that it will start from the beginning
2731 * again.
2732 */
2733void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2734{
554f786e 2735 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
2736 unsigned long flags;
2737
554f786e
SR
2738 if (!iter)
2739 return;
2740
2741 cpu_buffer = iter->cpu_buffer;
2742
642edba5
SR
2743 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2744 rb_iter_reset(iter);
f83c9d0f 2745 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2746}
c4f50183 2747EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
2748
2749/**
2750 * ring_buffer_iter_empty - check if an iterator has no more to read
2751 * @iter: The iterator to check
2752 */
2753int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2754{
2755 struct ring_buffer_per_cpu *cpu_buffer;
2756
2757 cpu_buffer = iter->cpu_buffer;
2758
bf41a158
SR
2759 return iter->head_page == cpu_buffer->commit_page &&
2760 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 2761}
c4f50183 2762EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
2763
2764static void
2765rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2766 struct ring_buffer_event *event)
2767{
2768 u64 delta;
2769
334d4169 2770 switch (event->type_len) {
7a8e76a3
SR
2771 case RINGBUF_TYPE_PADDING:
2772 return;
2773
2774 case RINGBUF_TYPE_TIME_EXTEND:
2775 delta = event->array[0];
2776 delta <<= TS_SHIFT;
2777 delta += event->time_delta;
2778 cpu_buffer->read_stamp += delta;
2779 return;
2780
2781 case RINGBUF_TYPE_TIME_STAMP:
2782 /* FIXME: not implemented */
2783 return;
2784
2785 case RINGBUF_TYPE_DATA:
2786 cpu_buffer->read_stamp += event->time_delta;
2787 return;
2788
2789 default:
2790 BUG();
2791 }
2792 return;
2793}
2794
2795static void
2796rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2797 struct ring_buffer_event *event)
2798{
2799 u64 delta;
2800
334d4169 2801 switch (event->type_len) {
7a8e76a3
SR
2802 case RINGBUF_TYPE_PADDING:
2803 return;
2804
2805 case RINGBUF_TYPE_TIME_EXTEND:
2806 delta = event->array[0];
2807 delta <<= TS_SHIFT;
2808 delta += event->time_delta;
2809 iter->read_stamp += delta;
2810 return;
2811
2812 case RINGBUF_TYPE_TIME_STAMP:
2813 /* FIXME: not implemented */
2814 return;
2815
2816 case RINGBUF_TYPE_DATA:
2817 iter->read_stamp += event->time_delta;
2818 return;
2819
2820 default:
2821 BUG();
2822 }
2823 return;
2824}
2825
d769041f
SR
2826static struct buffer_page *
2827rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2828{
d769041f
SR
2829 struct buffer_page *reader = NULL;
2830 unsigned long flags;
818e3dd3 2831 int nr_loops = 0;
77ae365e 2832 int ret;
d769041f 2833
3e03fb7f
SR
2834 local_irq_save(flags);
2835 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
2836
2837 again:
818e3dd3
SR
2838 /*
2839 * This should normally only loop twice. But because the
2840 * start of the reader inserts an empty page, it causes
2841 * a case where we will loop three times. There should be no
2842 * reason to loop four times (that I know of).
2843 */
3e89c7bb 2844 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
2845 reader = NULL;
2846 goto out;
2847 }
2848
d769041f
SR
2849 reader = cpu_buffer->reader_page;
2850
2851 /* If there's more to read, return this page */
bf41a158 2852 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
2853 goto out;
2854
2855 /* Never should we have an index greater than the size */
3e89c7bb
SR
2856 if (RB_WARN_ON(cpu_buffer,
2857 cpu_buffer->reader_page->read > rb_page_size(reader)))
2858 goto out;
d769041f
SR
2859
2860 /* check if we caught up to the tail */
2861 reader = NULL;
bf41a158 2862 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 2863 goto out;
7a8e76a3
SR
2864
2865 /*
d769041f 2866 * Reset the reader page to size zero.
7a8e76a3 2867 */
77ae365e
SR
2868 local_set(&cpu_buffer->reader_page->write, 0);
2869 local_set(&cpu_buffer->reader_page->entries, 0);
2870 local_set(&cpu_buffer->reader_page->page->commit, 0);
7a8e76a3 2871
77ae365e
SR
2872 spin:
2873 /*
2874 * Splice the empty reader page into the list around the head.
2875 */
2876 reader = rb_set_head_page(cpu_buffer);
d769041f
SR
2877 cpu_buffer->reader_page->list.next = reader->list.next;
2878 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 2879
3adc54fa
SR
2880 /*
2881 * cpu_buffer->pages just needs to point to the buffer, it
2882 * has no specific buffer page to point to. Lets move it out
2883 * of our way so we don't accidently swap it.
2884 */
2885 cpu_buffer->pages = reader->list.prev;
2886
77ae365e
SR
2887 /* The reader page will be pointing to the new head */
2888 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
7a8e76a3 2889
77ae365e
SR
2890 /*
2891 * Here's the tricky part.
2892 *
2893 * We need to move the pointer past the header page.
2894 * But we can only do that if a writer is not currently
2895 * moving it. The page before the header page has the
2896 * flag bit '1' set if it is pointing to the page we want.
2897 * but if the writer is in the process of moving it
2898 * than it will be '2' or already moved '0'.
2899 */
2900
2901 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
2902
2903 /*
77ae365e 2904 * If we did not convert it, then we must try again.
7a8e76a3 2905 */
77ae365e
SR
2906 if (!ret)
2907 goto spin;
7a8e76a3 2908
77ae365e
SR
2909 /*
2910 * Yeah! We succeeded in replacing the page.
2911 *
2912 * Now make the new head point back to the reader page.
2913 */
2914 reader->list.next->prev = &cpu_buffer->reader_page->list;
2915 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
d769041f
SR
2916
2917 /* Finally update the reader page to the new head */
2918 cpu_buffer->reader_page = reader;
2919 rb_reset_reader_page(cpu_buffer);
2920
2921 goto again;
2922
2923 out:
3e03fb7f
SR
2924 __raw_spin_unlock(&cpu_buffer->lock);
2925 local_irq_restore(flags);
d769041f
SR
2926
2927 return reader;
2928}
2929
2930static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2931{
2932 struct ring_buffer_event *event;
2933 struct buffer_page *reader;
2934 unsigned length;
2935
2936 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 2937
d769041f 2938 /* This function should not be called when buffer is empty */
3e89c7bb
SR
2939 if (RB_WARN_ON(cpu_buffer, !reader))
2940 return;
7a8e76a3 2941
d769041f
SR
2942 event = rb_reader_event(cpu_buffer);
2943
a1863c21 2944 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 2945 cpu_buffer->read++;
d769041f
SR
2946
2947 rb_update_read_stamp(cpu_buffer, event);
2948
2949 length = rb_event_length(event);
6f807acd 2950 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
2951}
2952
2953static void rb_advance_iter(struct ring_buffer_iter *iter)
2954{
2955 struct ring_buffer *buffer;
2956 struct ring_buffer_per_cpu *cpu_buffer;
2957 struct ring_buffer_event *event;
2958 unsigned length;
2959
2960 cpu_buffer = iter->cpu_buffer;
2961 buffer = cpu_buffer->buffer;
2962
2963 /*
2964 * Check if we are at the end of the buffer.
2965 */
bf41a158 2966 if (iter->head >= rb_page_size(iter->head_page)) {
ea05b57c
SR
2967 /* discarded commits can make the page empty */
2968 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 2969 return;
d769041f 2970 rb_inc_iter(iter);
7a8e76a3
SR
2971 return;
2972 }
2973
2974 event = rb_iter_head_event(iter);
2975
2976 length = rb_event_length(event);
2977
2978 /*
2979 * This should not be called to advance the header if we are
2980 * at the tail of the buffer.
2981 */
3e89c7bb 2982 if (RB_WARN_ON(cpu_buffer,
f536aafc 2983 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
2984 (iter->head + length > rb_commit_index(cpu_buffer))))
2985 return;
7a8e76a3
SR
2986
2987 rb_update_iter_read_stamp(iter, event);
2988
2989 iter->head += length;
2990
2991 /* check for end of page padding */
bf41a158
SR
2992 if ((iter->head >= rb_page_size(iter->head_page)) &&
2993 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
2994 rb_advance_iter(iter);
2995}
2996
f83c9d0f 2997static struct ring_buffer_event *
d8eeb2d3 2998rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
7a8e76a3 2999{
7a8e76a3 3000 struct ring_buffer_event *event;
d769041f 3001 struct buffer_page *reader;
818e3dd3 3002 int nr_loops = 0;
7a8e76a3 3003
7a8e76a3 3004 again:
818e3dd3
SR
3005 /*
3006 * We repeat when a timestamp is encountered. It is possible
3007 * to get multiple timestamps from an interrupt entering just
ea05b57c
SR
3008 * as one timestamp is about to be written, or from discarded
3009 * commits. The most that we can have is the number on a single page.
818e3dd3 3010 */
ea05b57c 3011 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 3012 return NULL;
818e3dd3 3013
d769041f
SR
3014 reader = rb_get_reader_page(cpu_buffer);
3015 if (!reader)
7a8e76a3
SR
3016 return NULL;
3017
d769041f 3018 event = rb_reader_event(cpu_buffer);
7a8e76a3 3019
334d4169 3020 switch (event->type_len) {
7a8e76a3 3021 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3022 if (rb_null_event(event))
3023 RB_WARN_ON(cpu_buffer, 1);
3024 /*
3025 * Because the writer could be discarding every
3026 * event it creates (which would probably be bad)
3027 * if we were to go back to "again" then we may never
3028 * catch up, and will trigger the warn on, or lock
3029 * the box. Return the padding, and we will release
3030 * the current locks, and try again.
3031 */
2d622719 3032 return event;
7a8e76a3
SR
3033
3034 case RINGBUF_TYPE_TIME_EXTEND:
3035 /* Internal data, OK to advance */
d769041f 3036 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3037 goto again;
3038
3039 case RINGBUF_TYPE_TIME_STAMP:
3040 /* FIXME: not implemented */
d769041f 3041 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3042 goto again;
3043
3044 case RINGBUF_TYPE_DATA:
3045 if (ts) {
3046 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 3047 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 3048 cpu_buffer->cpu, ts);
7a8e76a3
SR
3049 }
3050 return event;
3051
3052 default:
3053 BUG();
3054 }
3055
3056 return NULL;
3057}
c4f50183 3058EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 3059
f83c9d0f
SR
3060static struct ring_buffer_event *
3061rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
3062{
3063 struct ring_buffer *buffer;
3064 struct ring_buffer_per_cpu *cpu_buffer;
3065 struct ring_buffer_event *event;
818e3dd3 3066 int nr_loops = 0;
7a8e76a3
SR
3067
3068 if (ring_buffer_iter_empty(iter))
3069 return NULL;
3070
3071 cpu_buffer = iter->cpu_buffer;
3072 buffer = cpu_buffer->buffer;
3073
3074 again:
818e3dd3 3075 /*
ea05b57c
SR
3076 * We repeat when a timestamp is encountered.
3077 * We can get multiple timestamps by nested interrupts or also
3078 * if filtering is on (discarding commits). Since discarding
3079 * commits can be frequent we can get a lot of timestamps.
3080 * But we limit them by not adding timestamps if they begin
3081 * at the start of a page.
818e3dd3 3082 */
ea05b57c 3083 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 3084 return NULL;
818e3dd3 3085
7a8e76a3
SR
3086 if (rb_per_cpu_empty(cpu_buffer))
3087 return NULL;
3088
3089 event = rb_iter_head_event(iter);
3090
334d4169 3091 switch (event->type_len) {
7a8e76a3 3092 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3093 if (rb_null_event(event)) {
3094 rb_inc_iter(iter);
3095 goto again;
3096 }
3097 rb_advance_iter(iter);
3098 return event;
7a8e76a3
SR
3099
3100 case RINGBUF_TYPE_TIME_EXTEND:
3101 /* Internal data, OK to advance */
3102 rb_advance_iter(iter);
3103 goto again;
3104
3105 case RINGBUF_TYPE_TIME_STAMP:
3106 /* FIXME: not implemented */
3107 rb_advance_iter(iter);
3108 goto again;
3109
3110 case RINGBUF_TYPE_DATA:
3111 if (ts) {
3112 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
3113 ring_buffer_normalize_time_stamp(buffer,
3114 cpu_buffer->cpu, ts);
7a8e76a3
SR
3115 }
3116 return event;
3117
3118 default:
3119 BUG();
3120 }
3121
3122 return NULL;
3123}
c4f50183 3124EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 3125
8d707e8e
SR
3126static inline int rb_ok_to_lock(void)
3127{
3128 /*
3129 * If an NMI die dumps out the content of the ring buffer
3130 * do not grab locks. We also permanently disable the ring
3131 * buffer too. A one time deal is all you get from reading
3132 * the ring buffer from an NMI.
3133 */
464e85eb 3134 if (likely(!in_nmi()))
8d707e8e
SR
3135 return 1;
3136
3137 tracing_off_permanent();
3138 return 0;
3139}
3140
f83c9d0f
SR
3141/**
3142 * ring_buffer_peek - peek at the next event to be read
3143 * @buffer: The ring buffer to read
3144 * @cpu: The cpu to peak at
3145 * @ts: The timestamp counter of this event.
3146 *
3147 * This will return the event that will be read next, but does
3148 * not consume the data.
3149 */
3150struct ring_buffer_event *
3151ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3152{
3153 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 3154 struct ring_buffer_event *event;
f83c9d0f 3155 unsigned long flags;
8d707e8e 3156 int dolock;
f83c9d0f 3157
554f786e 3158 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3159 return NULL;
554f786e 3160
8d707e8e 3161 dolock = rb_ok_to_lock();
2d622719 3162 again:
8d707e8e
SR
3163 local_irq_save(flags);
3164 if (dolock)
3165 spin_lock(&cpu_buffer->reader_lock);
d8eeb2d3 3166 event = rb_buffer_peek(cpu_buffer, ts);
469535a5
RR
3167 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3168 rb_advance_reader(cpu_buffer);
8d707e8e
SR
3169 if (dolock)
3170 spin_unlock(&cpu_buffer->reader_lock);
3171 local_irq_restore(flags);
f83c9d0f 3172
1b959e18 3173 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3174 goto again;
2d622719 3175
f83c9d0f
SR
3176 return event;
3177}
3178
3179/**
3180 * ring_buffer_iter_peek - peek at the next event to be read
3181 * @iter: The ring buffer iterator
3182 * @ts: The timestamp counter of this event.
3183 *
3184 * This will return the event that will be read next, but does
3185 * not increment the iterator.
3186 */
3187struct ring_buffer_event *
3188ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3189{
3190 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3191 struct ring_buffer_event *event;
3192 unsigned long flags;
3193
2d622719 3194 again:
f83c9d0f
SR
3195 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3196 event = rb_iter_peek(iter, ts);
3197 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3198
1b959e18 3199 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3200 goto again;
2d622719 3201
f83c9d0f
SR
3202 return event;
3203}
3204
7a8e76a3
SR
3205/**
3206 * ring_buffer_consume - return an event and consume it
3207 * @buffer: The ring buffer to get the next event from
3208 *
3209 * Returns the next event in the ring buffer, and that event is consumed.
3210 * Meaning, that sequential reads will keep returning a different event,
3211 * and eventually empty the ring buffer if the producer is slower.
3212 */
3213struct ring_buffer_event *
3214ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3215{
554f786e
SR
3216 struct ring_buffer_per_cpu *cpu_buffer;
3217 struct ring_buffer_event *event = NULL;
f83c9d0f 3218 unsigned long flags;
8d707e8e
SR
3219 int dolock;
3220
3221 dolock = rb_ok_to_lock();
7a8e76a3 3222
2d622719 3223 again:
554f786e
SR
3224 /* might be called in atomic */
3225 preempt_disable();
3226
9e01c1b7 3227 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 3228 goto out;
7a8e76a3 3229
554f786e 3230 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3231 local_irq_save(flags);
3232 if (dolock)
3233 spin_lock(&cpu_buffer->reader_lock);
f83c9d0f 3234
d8eeb2d3 3235 event = rb_buffer_peek(cpu_buffer, ts);
469535a5
RR
3236 if (event)
3237 rb_advance_reader(cpu_buffer);
7a8e76a3 3238
8d707e8e
SR
3239 if (dolock)
3240 spin_unlock(&cpu_buffer->reader_lock);
3241 local_irq_restore(flags);
f83c9d0f 3242
554f786e
SR
3243 out:
3244 preempt_enable();
3245
1b959e18 3246 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3247 goto again;
2d622719 3248
7a8e76a3
SR
3249 return event;
3250}
c4f50183 3251EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
3252
3253/**
3254 * ring_buffer_read_start - start a non consuming read of the buffer
3255 * @buffer: The ring buffer to read from
3256 * @cpu: The cpu buffer to iterate over
3257 *
3258 * This starts up an iteration through the buffer. It also disables
3259 * the recording to the buffer until the reading is finished.
3260 * This prevents the reading from being corrupted. This is not
3261 * a consuming read, so a producer is not expected.
3262 *
3263 * Must be paired with ring_buffer_finish.
3264 */
3265struct ring_buffer_iter *
3266ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3267{
3268 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 3269 struct ring_buffer_iter *iter;
d769041f 3270 unsigned long flags;
7a8e76a3 3271
9e01c1b7 3272 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3273 return NULL;
7a8e76a3
SR
3274
3275 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3276 if (!iter)
8aabee57 3277 return NULL;
7a8e76a3
SR
3278
3279 cpu_buffer = buffer->buffers[cpu];
3280
3281 iter->cpu_buffer = cpu_buffer;
3282
3283 atomic_inc(&cpu_buffer->record_disabled);
3284 synchronize_sched();
3285
f83c9d0f 3286 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3e03fb7f 3287 __raw_spin_lock(&cpu_buffer->lock);
642edba5 3288 rb_iter_reset(iter);
3e03fb7f 3289 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3290 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
3291
3292 return iter;
3293}
c4f50183 3294EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
3295
3296/**
3297 * ring_buffer_finish - finish reading the iterator of the buffer
3298 * @iter: The iterator retrieved by ring_buffer_start
3299 *
3300 * This re-enables the recording to the buffer, and frees the
3301 * iterator.
3302 */
3303void
3304ring_buffer_read_finish(struct ring_buffer_iter *iter)
3305{
3306 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3307
3308 atomic_dec(&cpu_buffer->record_disabled);
3309 kfree(iter);
3310}
c4f50183 3311EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
3312
3313/**
3314 * ring_buffer_read - read the next item in the ring buffer by the iterator
3315 * @iter: The ring buffer iterator
3316 * @ts: The time stamp of the event read.
3317 *
3318 * This reads the next event in the ring buffer and increments the iterator.
3319 */
3320struct ring_buffer_event *
3321ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3322{
3323 struct ring_buffer_event *event;
f83c9d0f
SR
3324 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3325 unsigned long flags;
7a8e76a3 3326
f83c9d0f 3327 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 3328 again:
f83c9d0f 3329 event = rb_iter_peek(iter, ts);
7a8e76a3 3330 if (!event)
f83c9d0f 3331 goto out;
7a8e76a3 3332
7e9391cf
SR
3333 if (event->type_len == RINGBUF_TYPE_PADDING)
3334 goto again;
3335
7a8e76a3 3336 rb_advance_iter(iter);
f83c9d0f
SR
3337 out:
3338 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
3339
3340 return event;
3341}
c4f50183 3342EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
3343
3344/**
3345 * ring_buffer_size - return the size of the ring buffer (in bytes)
3346 * @buffer: The ring buffer.
3347 */
3348unsigned long ring_buffer_size(struct ring_buffer *buffer)
3349{
3350 return BUF_PAGE_SIZE * buffer->pages;
3351}
c4f50183 3352EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
3353
3354static void
3355rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3356{
77ae365e
SR
3357 rb_head_page_deactivate(cpu_buffer);
3358
7a8e76a3 3359 cpu_buffer->head_page
3adc54fa 3360 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 3361 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 3362 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 3363 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 3364
6f807acd 3365 cpu_buffer->head_page->read = 0;
bf41a158
SR
3366
3367 cpu_buffer->tail_page = cpu_buffer->head_page;
3368 cpu_buffer->commit_page = cpu_buffer->head_page;
3369
3370 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3371 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 3372 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 3373 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 3374 cpu_buffer->reader_page->read = 0;
7a8e76a3 3375
77ae365e
SR
3376 local_set(&cpu_buffer->commit_overrun, 0);
3377 local_set(&cpu_buffer->overrun, 0);
e4906eff 3378 local_set(&cpu_buffer->entries, 0);
fa743953
SR
3379 local_set(&cpu_buffer->committing, 0);
3380 local_set(&cpu_buffer->commits, 0);
77ae365e 3381 cpu_buffer->read = 0;
69507c06
SR
3382
3383 cpu_buffer->write_stamp = 0;
3384 cpu_buffer->read_stamp = 0;
77ae365e
SR
3385
3386 rb_head_page_activate(cpu_buffer);
7a8e76a3
SR
3387}
3388
3389/**
3390 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3391 * @buffer: The ring buffer to reset a per cpu buffer of
3392 * @cpu: The CPU buffer to be reset
3393 */
3394void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3395{
3396 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3397 unsigned long flags;
3398
9e01c1b7 3399 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3400 return;
7a8e76a3 3401
41ede23e
SR
3402 atomic_inc(&cpu_buffer->record_disabled);
3403
f83c9d0f
SR
3404 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3405
41b6a95d
SR
3406 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3407 goto out;
3408
3e03fb7f 3409 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
3410
3411 rb_reset_cpu(cpu_buffer);
3412
3e03fb7f 3413 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3414
41b6a95d 3415 out:
f83c9d0f 3416 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
41ede23e
SR
3417
3418 atomic_dec(&cpu_buffer->record_disabled);
7a8e76a3 3419}
c4f50183 3420EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
3421
3422/**
3423 * ring_buffer_reset - reset a ring buffer
3424 * @buffer: The ring buffer to reset all cpu buffers
3425 */
3426void ring_buffer_reset(struct ring_buffer *buffer)
3427{
7a8e76a3
SR
3428 int cpu;
3429
7a8e76a3 3430 for_each_buffer_cpu(buffer, cpu)
d769041f 3431 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 3432}
c4f50183 3433EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
3434
3435/**
3436 * rind_buffer_empty - is the ring buffer empty?
3437 * @buffer: The ring buffer to test
3438 */
3439int ring_buffer_empty(struct ring_buffer *buffer)
3440{
3441 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3442 unsigned long flags;
8d707e8e 3443 int dolock;
7a8e76a3 3444 int cpu;
d4788207 3445 int ret;
7a8e76a3 3446
8d707e8e 3447 dolock = rb_ok_to_lock();
7a8e76a3
SR
3448
3449 /* yes this is racy, but if you don't like the race, lock the buffer */
3450 for_each_buffer_cpu(buffer, cpu) {
3451 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3452 local_irq_save(flags);
3453 if (dolock)
3454 spin_lock(&cpu_buffer->reader_lock);
d4788207 3455 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3456 if (dolock)
3457 spin_unlock(&cpu_buffer->reader_lock);
3458 local_irq_restore(flags);
3459
d4788207 3460 if (!ret)
7a8e76a3
SR
3461 return 0;
3462 }
554f786e 3463
7a8e76a3
SR
3464 return 1;
3465}
c4f50183 3466EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
3467
3468/**
3469 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3470 * @buffer: The ring buffer
3471 * @cpu: The CPU buffer to test
3472 */
3473int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3474{
3475 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3476 unsigned long flags;
8d707e8e 3477 int dolock;
8aabee57 3478 int ret;
7a8e76a3 3479
9e01c1b7 3480 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3481 return 1;
7a8e76a3 3482
8d707e8e
SR
3483 dolock = rb_ok_to_lock();
3484
7a8e76a3 3485 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3486 local_irq_save(flags);
3487 if (dolock)
3488 spin_lock(&cpu_buffer->reader_lock);
554f786e 3489 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3490 if (dolock)
3491 spin_unlock(&cpu_buffer->reader_lock);
3492 local_irq_restore(flags);
554f786e
SR
3493
3494 return ret;
7a8e76a3 3495}
c4f50183 3496EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 3497
85bac32c 3498#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
3499/**
3500 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3501 * @buffer_a: One buffer to swap with
3502 * @buffer_b: The other buffer to swap with
3503 *
3504 * This function is useful for tracers that want to take a "snapshot"
3505 * of a CPU buffer and has another back up buffer lying around.
3506 * it is expected that the tracer handles the cpu buffer not being
3507 * used at the moment.
3508 */
3509int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3510 struct ring_buffer *buffer_b, int cpu)
3511{
3512 struct ring_buffer_per_cpu *cpu_buffer_a;
3513 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
3514 int ret = -EINVAL;
3515
9e01c1b7
RR
3516 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3517 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 3518 goto out;
7a8e76a3
SR
3519
3520 /* At least make sure the two buffers are somewhat the same */
6d102bc6 3521 if (buffer_a->pages != buffer_b->pages)
554f786e
SR
3522 goto out;
3523
3524 ret = -EAGAIN;
7a8e76a3 3525
97b17efe 3526 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 3527 goto out;
97b17efe
SR
3528
3529 if (atomic_read(&buffer_a->record_disabled))
554f786e 3530 goto out;
97b17efe
SR
3531
3532 if (atomic_read(&buffer_b->record_disabled))
554f786e 3533 goto out;
97b17efe 3534
7a8e76a3
SR
3535 cpu_buffer_a = buffer_a->buffers[cpu];
3536 cpu_buffer_b = buffer_b->buffers[cpu];
3537
97b17efe 3538 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 3539 goto out;
97b17efe
SR
3540
3541 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 3542 goto out;
97b17efe 3543
7a8e76a3
SR
3544 /*
3545 * We can't do a synchronize_sched here because this
3546 * function can be called in atomic context.
3547 * Normally this will be called from the same CPU as cpu.
3548 * If not it's up to the caller to protect this.
3549 */
3550 atomic_inc(&cpu_buffer_a->record_disabled);
3551 atomic_inc(&cpu_buffer_b->record_disabled);
3552
98277991
SR
3553 ret = -EBUSY;
3554 if (local_read(&cpu_buffer_a->committing))
3555 goto out_dec;
3556 if (local_read(&cpu_buffer_b->committing))
3557 goto out_dec;
3558
7a8e76a3
SR
3559 buffer_a->buffers[cpu] = cpu_buffer_b;
3560 buffer_b->buffers[cpu] = cpu_buffer_a;
3561
3562 cpu_buffer_b->buffer = buffer_a;
3563 cpu_buffer_a->buffer = buffer_b;
3564
98277991
SR
3565 ret = 0;
3566
3567out_dec:
7a8e76a3
SR
3568 atomic_dec(&cpu_buffer_a->record_disabled);
3569 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 3570out:
554f786e 3571 return ret;
7a8e76a3 3572}
c4f50183 3573EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 3574#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 3575
8789a9e7
SR
3576/**
3577 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3578 * @buffer: the buffer to allocate for.
3579 *
3580 * This function is used in conjunction with ring_buffer_read_page.
3581 * When reading a full page from the ring buffer, these functions
3582 * can be used to speed up the process. The calling function should
3583 * allocate a few pages first with this function. Then when it
3584 * needs to get pages from the ring buffer, it passes the result
3585 * of this function into ring_buffer_read_page, which will swap
3586 * the page that was allocated, with the read page of the buffer.
3587 *
3588 * Returns:
3589 * The page allocated, or NULL on error.
3590 */
3591void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3592{
044fa782 3593 struct buffer_data_page *bpage;
ef7a4a16 3594 unsigned long addr;
8789a9e7
SR
3595
3596 addr = __get_free_page(GFP_KERNEL);
3597 if (!addr)
3598 return NULL;
3599
044fa782 3600 bpage = (void *)addr;
8789a9e7 3601
ef7a4a16
SR
3602 rb_init_page(bpage);
3603
044fa782 3604 return bpage;
8789a9e7 3605}
d6ce96da 3606EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
3607
3608/**
3609 * ring_buffer_free_read_page - free an allocated read page
3610 * @buffer: the buffer the page was allocate for
3611 * @data: the page to free
3612 *
3613 * Free a page allocated from ring_buffer_alloc_read_page.
3614 */
3615void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3616{
3617 free_page((unsigned long)data);
3618}
d6ce96da 3619EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
3620
3621/**
3622 * ring_buffer_read_page - extract a page from the ring buffer
3623 * @buffer: buffer to extract from
3624 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 3625 * @len: amount to extract
8789a9e7
SR
3626 * @cpu: the cpu of the buffer to extract
3627 * @full: should the extraction only happen when the page is full.
3628 *
3629 * This function will pull out a page from the ring buffer and consume it.
3630 * @data_page must be the address of the variable that was returned
3631 * from ring_buffer_alloc_read_page. This is because the page might be used
3632 * to swap with a page in the ring buffer.
3633 *
3634 * for example:
b85fa01e 3635 * rpage = ring_buffer_alloc_read_page(buffer);
8789a9e7
SR
3636 * if (!rpage)
3637 * return error;
ef7a4a16 3638 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
3639 * if (ret >= 0)
3640 * process_page(rpage, ret);
8789a9e7
SR
3641 *
3642 * When @full is set, the function will not return true unless
3643 * the writer is off the reader page.
3644 *
3645 * Note: it is up to the calling functions to handle sleeps and wakeups.
3646 * The ring buffer can be used anywhere in the kernel and can not
3647 * blindly call wake_up. The layer that uses the ring buffer must be
3648 * responsible for that.
3649 *
3650 * Returns:
667d2412
LJ
3651 * >=0 if data has been transferred, returns the offset of consumed data.
3652 * <0 if no data has been transferred.
8789a9e7
SR
3653 */
3654int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 3655 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
3656{
3657 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3658 struct ring_buffer_event *event;
044fa782 3659 struct buffer_data_page *bpage;
ef7a4a16 3660 struct buffer_page *reader;
8789a9e7 3661 unsigned long flags;
ef7a4a16 3662 unsigned int commit;
667d2412 3663 unsigned int read;
4f3640f8 3664 u64 save_timestamp;
667d2412 3665 int ret = -1;
8789a9e7 3666
554f786e
SR
3667 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3668 goto out;
3669
474d32b6
SR
3670 /*
3671 * If len is not big enough to hold the page header, then
3672 * we can not copy anything.
3673 */
3674 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 3675 goto out;
474d32b6
SR
3676
3677 len -= BUF_PAGE_HDR_SIZE;
3678
8789a9e7 3679 if (!data_page)
554f786e 3680 goto out;
8789a9e7 3681
044fa782
SR
3682 bpage = *data_page;
3683 if (!bpage)
554f786e 3684 goto out;
8789a9e7
SR
3685
3686 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3687
ef7a4a16
SR
3688 reader = rb_get_reader_page(cpu_buffer);
3689 if (!reader)
554f786e 3690 goto out_unlock;
8789a9e7 3691
ef7a4a16
SR
3692 event = rb_reader_event(cpu_buffer);
3693
3694 read = reader->read;
3695 commit = rb_page_commit(reader);
667d2412 3696
8789a9e7 3697 /*
474d32b6
SR
3698 * If this page has been partially read or
3699 * if len is not big enough to read the rest of the page or
3700 * a writer is still on the page, then
3701 * we must copy the data from the page to the buffer.
3702 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 3703 */
474d32b6 3704 if (read || (len < (commit - read)) ||
ef7a4a16 3705 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 3706 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
3707 unsigned int rpos = read;
3708 unsigned int pos = 0;
ef7a4a16 3709 unsigned int size;
8789a9e7
SR
3710
3711 if (full)
554f786e 3712 goto out_unlock;
8789a9e7 3713
ef7a4a16
SR
3714 if (len > (commit - read))
3715 len = (commit - read);
3716
3717 size = rb_event_length(event);
3718
3719 if (len < size)
554f786e 3720 goto out_unlock;
ef7a4a16 3721
4f3640f8
SR
3722 /* save the current timestamp, since the user will need it */
3723 save_timestamp = cpu_buffer->read_stamp;
3724
ef7a4a16
SR
3725 /* Need to copy one event at a time */
3726 do {
474d32b6 3727 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
3728
3729 len -= size;
3730
3731 rb_advance_reader(cpu_buffer);
474d32b6
SR
3732 rpos = reader->read;
3733 pos += size;
ef7a4a16
SR
3734
3735 event = rb_reader_event(cpu_buffer);
3736 size = rb_event_length(event);
3737 } while (len > size);
667d2412
LJ
3738
3739 /* update bpage */
ef7a4a16 3740 local_set(&bpage->commit, pos);
4f3640f8 3741 bpage->time_stamp = save_timestamp;
ef7a4a16 3742
474d32b6
SR
3743 /* we copied everything to the beginning */
3744 read = 0;
8789a9e7 3745 } else {
afbab76a 3746 /* update the entry counter */
77ae365e 3747 cpu_buffer->read += rb_page_entries(reader);
afbab76a 3748
8789a9e7 3749 /* swap the pages */
044fa782 3750 rb_init_page(bpage);
ef7a4a16
SR
3751 bpage = reader->page;
3752 reader->page = *data_page;
3753 local_set(&reader->write, 0);
778c55d4 3754 local_set(&reader->entries, 0);
ef7a4a16 3755 reader->read = 0;
044fa782 3756 *data_page = bpage;
8789a9e7 3757 }
667d2412 3758 ret = read;
8789a9e7 3759
554f786e 3760 out_unlock:
8789a9e7
SR
3761 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3762
554f786e 3763 out:
8789a9e7
SR
3764 return ret;
3765}
d6ce96da 3766EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 3767
1155de47 3768#ifdef CONFIG_TRACING
a3583244
SR
3769static ssize_t
3770rb_simple_read(struct file *filp, char __user *ubuf,
3771 size_t cnt, loff_t *ppos)
3772{
5e39841c 3773 unsigned long *p = filp->private_data;
a3583244
SR
3774 char buf[64];
3775 int r;
3776
033601a3
SR
3777 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3778 r = sprintf(buf, "permanently disabled\n");
3779 else
3780 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
3781
3782 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3783}
3784
3785static ssize_t
3786rb_simple_write(struct file *filp, const char __user *ubuf,
3787 size_t cnt, loff_t *ppos)
3788{
5e39841c 3789 unsigned long *p = filp->private_data;
a3583244 3790 char buf[64];
5e39841c 3791 unsigned long val;
a3583244
SR
3792 int ret;
3793
3794 if (cnt >= sizeof(buf))
3795 return -EINVAL;
3796
3797 if (copy_from_user(&buf, ubuf, cnt))
3798 return -EFAULT;
3799
3800 buf[cnt] = 0;
3801
3802 ret = strict_strtoul(buf, 10, &val);
3803 if (ret < 0)
3804 return ret;
3805
033601a3
SR
3806 if (val)
3807 set_bit(RB_BUFFERS_ON_BIT, p);
3808 else
3809 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
3810
3811 (*ppos)++;
3812
3813 return cnt;
3814}
3815
5e2336a0 3816static const struct file_operations rb_simple_fops = {
a3583244
SR
3817 .open = tracing_open_generic,
3818 .read = rb_simple_read,
3819 .write = rb_simple_write,
3820};
3821
3822
3823static __init int rb_init_debugfs(void)
3824{
3825 struct dentry *d_tracer;
a3583244
SR
3826
3827 d_tracer = tracing_init_dentry();
3828
5452af66
FW
3829 trace_create_file("tracing_on", 0644, d_tracer,
3830 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
3831
3832 return 0;
3833}
3834
3835fs_initcall(rb_init_debugfs);
1155de47 3836#endif
554f786e 3837
59222efe 3838#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
3839static int rb_cpu_notify(struct notifier_block *self,
3840 unsigned long action, void *hcpu)
554f786e
SR
3841{
3842 struct ring_buffer *buffer =
3843 container_of(self, struct ring_buffer, cpu_notify);
3844 long cpu = (long)hcpu;
3845
3846 switch (action) {
3847 case CPU_UP_PREPARE:
3848 case CPU_UP_PREPARE_FROZEN:
3f237a79 3849 if (cpumask_test_cpu(cpu, buffer->cpumask))
554f786e
SR
3850 return NOTIFY_OK;
3851
3852 buffer->buffers[cpu] =
3853 rb_allocate_cpu_buffer(buffer, cpu);
3854 if (!buffer->buffers[cpu]) {
3855 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3856 cpu);
3857 return NOTIFY_OK;
3858 }
3859 smp_wmb();
3f237a79 3860 cpumask_set_cpu(cpu, buffer->cpumask);
554f786e
SR
3861 break;
3862 case CPU_DOWN_PREPARE:
3863 case CPU_DOWN_PREPARE_FROZEN:
3864 /*
3865 * Do nothing.
3866 * If we were to free the buffer, then the user would
3867 * lose any trace that was in the buffer.
3868 */
3869 break;
3870 default:
3871 break;
3872 }
3873 return NOTIFY_OK;
3874}
3875#endif