4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h> /* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
28 #include <asm/local.h>
30 static void update_pages_handler(struct work_struct
*work
);
33 * The ring buffer header is special. We must manually up keep it.
35 int ring_buffer_print_entry_header(struct trace_seq
*s
)
39 ret
= trace_seq_puts(s
, "# compressed entry header\n");
40 ret
= trace_seq_puts(s
, "\ttype_len : 5 bits\n");
41 ret
= trace_seq_puts(s
, "\ttime_delta : 27 bits\n");
42 ret
= trace_seq_puts(s
, "\tarray : 32 bits\n");
43 ret
= trace_seq_putc(s
, '\n');
44 ret
= trace_seq_printf(s
, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING
);
46 ret
= trace_seq_printf(s
, "\ttime_extend : type == %d\n",
47 RINGBUF_TYPE_TIME_EXTEND
);
48 ret
= trace_seq_printf(s
, "\tdata max type_len == %d\n",
49 RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
55 * The ring buffer is made up of a list of pages. A separate list of pages is
56 * allocated for each CPU. A writer may only write to a buffer that is
57 * associated with the CPU it is currently executing on. A reader may read
58 * from any per cpu buffer.
60 * The reader is special. For each per cpu buffer, the reader has its own
61 * reader page. When a reader has read the entire reader page, this reader
62 * page is swapped with another page in the ring buffer.
64 * Now, as long as the writer is off the reader page, the reader can do what
65 * ever it wants with that page. The writer will never write to that page
66 * again (as long as it is out of the ring buffer).
68 * Here's some silly ASCII art.
71 * |reader| RING BUFFER
73 * +------+ +---+ +---+ +---+
82 * |reader| RING BUFFER
83 * |page |------------------v
84 * +------+ +---+ +---+ +---+
93 * |reader| RING BUFFER
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
100 * +------------------------------+
104 * |buffer| RING BUFFER
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
108 * | New +---+ +---+ +---+
111 * +------------------------------+
114 * After we make this swap, the reader can hand this page off to the splice
115 * code and be done with it. It can even allocate a new page if it needs to
116 * and swap that into the ring buffer.
118 * We will be using cmpxchg soon to make all this lockless.
123 * A fast way to enable or disable all ring buffers is to
124 * call tracing_on or tracing_off. Turning off the ring buffers
125 * prevents all ring buffers from being recorded to.
126 * Turning this switch on, makes it OK to write to the
127 * ring buffer, if the ring buffer is enabled itself.
129 * There's three layers that must be on in order to write
130 * to the ring buffer.
132 * 1) This global flag must be set.
133 * 2) The ring buffer must be enabled for recording.
134 * 3) The per cpu buffer must be enabled for recording.
136 * In case of an anomaly, this global flag has a bit set that
137 * will permantly disable all ring buffers.
141 * Global flag to disable all recording to ring buffers
142 * This has two bits: ON, DISABLED
146 * 0 0 : ring buffers are off
147 * 1 0 : ring buffers are on
148 * X 1 : ring buffers are permanently disabled
152 RB_BUFFERS_ON_BIT
= 0,
153 RB_BUFFERS_DISABLED_BIT
= 1,
157 RB_BUFFERS_ON
= 1 << RB_BUFFERS_ON_BIT
,
158 RB_BUFFERS_DISABLED
= 1 << RB_BUFFERS_DISABLED_BIT
,
161 static unsigned long ring_buffer_flags __read_mostly
= RB_BUFFERS_ON
;
163 /* Used for individual buffers (after the counter) */
164 #define RB_BUFFER_OFF (1 << 20)
166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
169 * tracing_off_permanent - permanently disable ring buffers
171 * This function, once called, will disable all ring buffers
174 void tracing_off_permanent(void)
176 set_bit(RB_BUFFERS_DISABLED_BIT
, &ring_buffer_flags
);
179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
180 #define RB_ALIGNMENT 4U
181 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
182 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
185 # define RB_FORCE_8BYTE_ALIGNMENT 0
186 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
188 # define RB_FORCE_8BYTE_ALIGNMENT 1
189 # define RB_ARCH_ALIGNMENT 8U
192 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
198 RB_LEN_TIME_EXTEND
= 8,
199 RB_LEN_TIME_STAMP
= 16,
202 #define skip_time_extend(event) \
203 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
205 static inline int rb_null_event(struct ring_buffer_event
*event
)
207 return event
->type_len
== RINGBUF_TYPE_PADDING
&& !event
->time_delta
;
210 static void rb_event_set_padding(struct ring_buffer_event
*event
)
212 /* padding has a NULL time_delta */
213 event
->type_len
= RINGBUF_TYPE_PADDING
;
214 event
->time_delta
= 0;
218 rb_event_data_length(struct ring_buffer_event
*event
)
223 length
= event
->type_len
* RB_ALIGNMENT
;
225 length
= event
->array
[0];
226 return length
+ RB_EVNT_HDR_SIZE
;
230 * Return the length of the given event. Will return
231 * the length of the time extend if the event is a
234 static inline unsigned
235 rb_event_length(struct ring_buffer_event
*event
)
237 switch (event
->type_len
) {
238 case RINGBUF_TYPE_PADDING
:
239 if (rb_null_event(event
))
242 return event
->array
[0] + RB_EVNT_HDR_SIZE
;
244 case RINGBUF_TYPE_TIME_EXTEND
:
245 return RB_LEN_TIME_EXTEND
;
247 case RINGBUF_TYPE_TIME_STAMP
:
248 return RB_LEN_TIME_STAMP
;
250 case RINGBUF_TYPE_DATA
:
251 return rb_event_data_length(event
);
260 * Return total length of time extend and data,
261 * or just the event length for all other events.
263 static inline unsigned
264 rb_event_ts_length(struct ring_buffer_event
*event
)
268 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
) {
269 /* time extends include the data event after it */
270 len
= RB_LEN_TIME_EXTEND
;
271 event
= skip_time_extend(event
);
273 return len
+ rb_event_length(event
);
277 * ring_buffer_event_length - return the length of the event
278 * @event: the event to get the length of
280 * Returns the size of the data load of a data event.
281 * If the event is something other than a data event, it
282 * returns the size of the event itself. With the exception
283 * of a TIME EXTEND, where it still returns the size of the
284 * data load of the data event after it.
286 unsigned ring_buffer_event_length(struct ring_buffer_event
*event
)
290 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
291 event
= skip_time_extend(event
);
293 length
= rb_event_length(event
);
294 if (event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
296 length
-= RB_EVNT_HDR_SIZE
;
297 if (length
> RB_MAX_SMALL_DATA
+ sizeof(event
->array
[0]))
298 length
-= sizeof(event
->array
[0]);
301 EXPORT_SYMBOL_GPL(ring_buffer_event_length
);
303 /* inline for ring buffer fast paths */
305 rb_event_data(struct ring_buffer_event
*event
)
307 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
308 event
= skip_time_extend(event
);
309 BUG_ON(event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
310 /* If length is in len field, then array[0] has the data */
312 return (void *)&event
->array
[0];
313 /* Otherwise length is in array[0] and array[1] has the data */
314 return (void *)&event
->array
[1];
318 * ring_buffer_event_data - return the data of the event
319 * @event: the event to get the data from
321 void *ring_buffer_event_data(struct ring_buffer_event
*event
)
323 return rb_event_data(event
);
325 EXPORT_SYMBOL_GPL(ring_buffer_event_data
);
327 #define for_each_buffer_cpu(buffer, cpu) \
328 for_each_cpu(cpu, buffer->cpumask)
331 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
332 #define TS_DELTA_TEST (~TS_MASK)
334 /* Flag when events were overwritten */
335 #define RB_MISSED_EVENTS (1 << 31)
336 /* Missed count stored at end */
337 #define RB_MISSED_STORED (1 << 30)
339 struct buffer_data_page
{
340 u64 time_stamp
; /* page time stamp */
341 local_t commit
; /* write committed index */
342 unsigned char data
[] RB_ALIGN_DATA
; /* data of buffer page */
346 * Note, the buffer_page list must be first. The buffer pages
347 * are allocated in cache lines, which means that each buffer
348 * page will be at the beginning of a cache line, and thus
349 * the least significant bits will be zero. We use this to
350 * add flags in the list struct pointers, to make the ring buffer
354 struct list_head list
; /* list of buffer pages */
355 local_t write
; /* index for next write */
356 unsigned read
; /* index for next read */
357 local_t entries
; /* entries on this page */
358 unsigned long real_end
; /* real end of data */
359 struct buffer_data_page
*page
; /* Actual data page */
363 * The buffer page counters, write and entries, must be reset
364 * atomically when crossing page boundaries. To synchronize this
365 * update, two counters are inserted into the number. One is
366 * the actual counter for the write position or count on the page.
368 * The other is a counter of updaters. Before an update happens
369 * the update partition of the counter is incremented. This will
370 * allow the updater to update the counter atomically.
372 * The counter is 20 bits, and the state data is 12.
374 #define RB_WRITE_MASK 0xfffff
375 #define RB_WRITE_INTCNT (1 << 20)
377 static void rb_init_page(struct buffer_data_page
*bpage
)
379 local_set(&bpage
->commit
, 0);
383 * ring_buffer_page_len - the size of data on the page.
384 * @page: The page to read
386 * Returns the amount of data on the page, including buffer page header.
388 size_t ring_buffer_page_len(void *page
)
390 return local_read(&((struct buffer_data_page
*)page
)->commit
)
395 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
398 static void free_buffer_page(struct buffer_page
*bpage
)
400 free_page((unsigned long)bpage
->page
);
405 * We need to fit the time_stamp delta into 27 bits.
407 static inline int test_time_stamp(u64 delta
)
409 if (delta
& TS_DELTA_TEST
)
414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
419 int ring_buffer_print_page_header(struct trace_seq
*s
)
421 struct buffer_data_page field
;
424 ret
= trace_seq_printf(s
, "\tfield: u64 timestamp;\t"
425 "offset:0;\tsize:%u;\tsigned:%u;\n",
426 (unsigned int)sizeof(field
.time_stamp
),
427 (unsigned int)is_signed_type(u64
));
429 ret
= trace_seq_printf(s
, "\tfield: local_t commit;\t"
430 "offset:%u;\tsize:%u;\tsigned:%u;\n",
431 (unsigned int)offsetof(typeof(field
), commit
),
432 (unsigned int)sizeof(field
.commit
),
433 (unsigned int)is_signed_type(long));
435 ret
= trace_seq_printf(s
, "\tfield: int overwrite;\t"
436 "offset:%u;\tsize:%u;\tsigned:%u;\n",
437 (unsigned int)offsetof(typeof(field
), commit
),
439 (unsigned int)is_signed_type(long));
441 ret
= trace_seq_printf(s
, "\tfield: char data;\t"
442 "offset:%u;\tsize:%u;\tsigned:%u;\n",
443 (unsigned int)offsetof(typeof(field
), data
),
444 (unsigned int)BUF_PAGE_SIZE
,
445 (unsigned int)is_signed_type(char));
451 struct irq_work work
;
452 wait_queue_head_t waiters
;
453 bool waiters_pending
;
457 * head_page == tail_page && head == tail then buffer is empty.
459 struct ring_buffer_per_cpu
{
461 atomic_t record_disabled
;
462 struct ring_buffer
*buffer
;
463 raw_spinlock_t reader_lock
; /* serialize readers */
464 arch_spinlock_t lock
;
465 struct lock_class_key lock_key
;
466 unsigned int nr_pages
;
467 struct list_head
*pages
;
468 struct buffer_page
*head_page
; /* read from head */
469 struct buffer_page
*tail_page
; /* write to tail */
470 struct buffer_page
*commit_page
; /* committed pages */
471 struct buffer_page
*reader_page
;
472 unsigned long lost_events
;
473 unsigned long last_overrun
;
474 local_t entries_bytes
;
477 local_t commit_overrun
;
478 local_t dropped_events
;
482 unsigned long read_bytes
;
485 /* ring buffer pages to update, > 0 to add, < 0 to remove */
486 int nr_pages_to_update
;
487 struct list_head new_pages
; /* new pages to add */
488 struct work_struct update_pages_work
;
489 struct completion update_done
;
491 struct rb_irq_work irq_work
;
497 atomic_t record_disabled
;
498 atomic_t resize_disabled
;
499 cpumask_var_t cpumask
;
501 struct lock_class_key
*reader_lock_key
;
505 struct ring_buffer_per_cpu
**buffers
;
507 #ifdef CONFIG_HOTPLUG_CPU
508 struct notifier_block cpu_notify
;
512 struct rb_irq_work irq_work
;
515 struct ring_buffer_iter
{
516 struct ring_buffer_per_cpu
*cpu_buffer
;
518 struct buffer_page
*head_page
;
519 struct buffer_page
*cache_reader_page
;
520 unsigned long cache_read
;
525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
527 * Schedules a delayed work to wake up any task that is blocked on the
528 * ring buffer waiters queue.
530 static void rb_wake_up_waiters(struct irq_work
*work
)
532 struct rb_irq_work
*rbwork
= container_of(work
, struct rb_irq_work
, work
);
534 wake_up_all(&rbwork
->waiters
);
538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer.
546 int ring_buffer_wait(struct ring_buffer
*buffer
, int cpu
)
548 struct ring_buffer_per_cpu
*cpu_buffer
;
550 struct rb_irq_work
*work
;
553 * Depending on what the caller is waiting for, either any
554 * data in any cpu buffer, or a specific buffer, put the
555 * caller on the appropriate wait queue.
557 if (cpu
== RING_BUFFER_ALL_CPUS
)
558 work
= &buffer
->irq_work
;
560 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
562 cpu_buffer
= buffer
->buffers
[cpu
];
563 work
= &cpu_buffer
->irq_work
;
567 prepare_to_wait(&work
->waiters
, &wait
, TASK_INTERRUPTIBLE
);
570 * The events can happen in critical sections where
571 * checking a work queue can cause deadlocks.
572 * After adding a task to the queue, this flag is set
573 * only to notify events to try to wake up the queue
576 * We don't clear it even if the buffer is no longer
577 * empty. The flag only causes the next event to run
578 * irq_work to do the work queue wake up. The worse
579 * that can happen if we race with !trace_empty() is that
580 * an event will cause an irq_work to try to wake up
583 * There's no reason to protect this flag either, as
584 * the work queue and irq_work logic will do the necessary
585 * synchronization for the wake ups. The only thing
586 * that is necessary is that the wake up happens after
587 * a task has been queued. It's OK for spurious wake ups.
589 work
->waiters_pending
= true;
591 if ((cpu
== RING_BUFFER_ALL_CPUS
&& ring_buffer_empty(buffer
)) ||
592 (cpu
!= RING_BUFFER_ALL_CPUS
&& ring_buffer_empty_cpu(buffer
, cpu
)))
595 finish_wait(&work
->waiters
, &wait
);
600 * ring_buffer_poll_wait - poll on buffer input
601 * @buffer: buffer to wait on
602 * @cpu: the cpu buffer to wait on
603 * @filp: the file descriptor
604 * @poll_table: The poll descriptor
606 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
607 * as data is added to any of the @buffer's cpu buffers. Otherwise
608 * it will wait for data to be added to a specific cpu buffer.
610 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
613 int ring_buffer_poll_wait(struct ring_buffer
*buffer
, int cpu
,
614 struct file
*filp
, poll_table
*poll_table
)
616 struct ring_buffer_per_cpu
*cpu_buffer
;
617 struct rb_irq_work
*work
;
619 if (cpu
== RING_BUFFER_ALL_CPUS
)
620 work
= &buffer
->irq_work
;
622 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
625 cpu_buffer
= buffer
->buffers
[cpu
];
626 work
= &cpu_buffer
->irq_work
;
629 work
->waiters_pending
= true;
630 poll_wait(filp
, &work
->waiters
, poll_table
);
632 if ((cpu
== RING_BUFFER_ALL_CPUS
&& !ring_buffer_empty(buffer
)) ||
633 (cpu
!= RING_BUFFER_ALL_CPUS
&& !ring_buffer_empty_cpu(buffer
, cpu
)))
634 return POLLIN
| POLLRDNORM
;
638 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
639 #define RB_WARN_ON(b, cond) \
641 int _____ret = unlikely(cond); \
643 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
644 struct ring_buffer_per_cpu *__b = \
646 atomic_inc(&__b->buffer->record_disabled); \
648 atomic_inc(&b->record_disabled); \
654 /* Up this if you want to test the TIME_EXTENTS and normalization */
655 #define DEBUG_SHIFT 0
657 static inline u64
rb_time_stamp(struct ring_buffer
*buffer
)
659 /* shift to debug/test normalization and TIME_EXTENTS */
660 return buffer
->clock() << DEBUG_SHIFT
;
663 u64
ring_buffer_time_stamp(struct ring_buffer
*buffer
, int cpu
)
667 preempt_disable_notrace();
668 time
= rb_time_stamp(buffer
);
669 preempt_enable_no_resched_notrace();
673 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp
);
675 void ring_buffer_normalize_time_stamp(struct ring_buffer
*buffer
,
678 /* Just stupid testing the normalize function and deltas */
681 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp
);
684 * Making the ring buffer lockless makes things tricky.
685 * Although writes only happen on the CPU that they are on,
686 * and they only need to worry about interrupts. Reads can
689 * The reader page is always off the ring buffer, but when the
690 * reader finishes with a page, it needs to swap its page with
691 * a new one from the buffer. The reader needs to take from
692 * the head (writes go to the tail). But if a writer is in overwrite
693 * mode and wraps, it must push the head page forward.
695 * Here lies the problem.
697 * The reader must be careful to replace only the head page, and
698 * not another one. As described at the top of the file in the
699 * ASCII art, the reader sets its old page to point to the next
700 * page after head. It then sets the page after head to point to
701 * the old reader page. But if the writer moves the head page
702 * during this operation, the reader could end up with the tail.
704 * We use cmpxchg to help prevent this race. We also do something
705 * special with the page before head. We set the LSB to 1.
707 * When the writer must push the page forward, it will clear the
708 * bit that points to the head page, move the head, and then set
709 * the bit that points to the new head page.
711 * We also don't want an interrupt coming in and moving the head
712 * page on another writer. Thus we use the second LSB to catch
715 * head->list->prev->next bit 1 bit 0
718 * Points to head page 0 1
721 * Note we can not trust the prev pointer of the head page, because:
723 * +----+ +-----+ +-----+
724 * | |------>| T |---X--->| N |
726 * +----+ +-----+ +-----+
729 * +----------| R |----------+ |
733 * Key: ---X--> HEAD flag set in pointer
738 * (see __rb_reserve_next() to see where this happens)
740 * What the above shows is that the reader just swapped out
741 * the reader page with a page in the buffer, but before it
742 * could make the new header point back to the new page added
743 * it was preempted by a writer. The writer moved forward onto
744 * the new page added by the reader and is about to move forward
747 * You can see, it is legitimate for the previous pointer of
748 * the head (or any page) not to point back to itself. But only
752 #define RB_PAGE_NORMAL 0UL
753 #define RB_PAGE_HEAD 1UL
754 #define RB_PAGE_UPDATE 2UL
757 #define RB_FLAG_MASK 3UL
759 /* PAGE_MOVED is not part of the mask */
760 #define RB_PAGE_MOVED 4UL
763 * rb_list_head - remove any bit
765 static struct list_head
*rb_list_head(struct list_head
*list
)
767 unsigned long val
= (unsigned long)list
;
769 return (struct list_head
*)(val
& ~RB_FLAG_MASK
);
773 * rb_is_head_page - test if the given page is the head page
775 * Because the reader may move the head_page pointer, we can
776 * not trust what the head page is (it may be pointing to
777 * the reader page). But if the next page is a header page,
778 * its flags will be non zero.
781 rb_is_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
782 struct buffer_page
*page
, struct list_head
*list
)
786 val
= (unsigned long)list
->next
;
788 if ((val
& ~RB_FLAG_MASK
) != (unsigned long)&page
->list
)
789 return RB_PAGE_MOVED
;
791 return val
& RB_FLAG_MASK
;
797 * The unique thing about the reader page, is that, if the
798 * writer is ever on it, the previous pointer never points
799 * back to the reader page.
801 static int rb_is_reader_page(struct buffer_page
*page
)
803 struct list_head
*list
= page
->list
.prev
;
805 return rb_list_head(list
->next
) != &page
->list
;
809 * rb_set_list_to_head - set a list_head to be pointing to head.
811 static void rb_set_list_to_head(struct ring_buffer_per_cpu
*cpu_buffer
,
812 struct list_head
*list
)
816 ptr
= (unsigned long *)&list
->next
;
817 *ptr
|= RB_PAGE_HEAD
;
818 *ptr
&= ~RB_PAGE_UPDATE
;
822 * rb_head_page_activate - sets up head page
824 static void rb_head_page_activate(struct ring_buffer_per_cpu
*cpu_buffer
)
826 struct buffer_page
*head
;
828 head
= cpu_buffer
->head_page
;
833 * Set the previous list pointer to have the HEAD flag.
835 rb_set_list_to_head(cpu_buffer
, head
->list
.prev
);
838 static void rb_list_head_clear(struct list_head
*list
)
840 unsigned long *ptr
= (unsigned long *)&list
->next
;
842 *ptr
&= ~RB_FLAG_MASK
;
846 * rb_head_page_dactivate - clears head page ptr (for free list)
849 rb_head_page_deactivate(struct ring_buffer_per_cpu
*cpu_buffer
)
851 struct list_head
*hd
;
853 /* Go through the whole list and clear any pointers found. */
854 rb_list_head_clear(cpu_buffer
->pages
);
856 list_for_each(hd
, cpu_buffer
->pages
)
857 rb_list_head_clear(hd
);
860 static int rb_head_page_set(struct ring_buffer_per_cpu
*cpu_buffer
,
861 struct buffer_page
*head
,
862 struct buffer_page
*prev
,
863 int old_flag
, int new_flag
)
865 struct list_head
*list
;
866 unsigned long val
= (unsigned long)&head
->list
;
871 val
&= ~RB_FLAG_MASK
;
873 ret
= cmpxchg((unsigned long *)&list
->next
,
874 val
| old_flag
, val
| new_flag
);
876 /* check if the reader took the page */
877 if ((ret
& ~RB_FLAG_MASK
) != val
)
878 return RB_PAGE_MOVED
;
880 return ret
& RB_FLAG_MASK
;
883 static int rb_head_page_set_update(struct ring_buffer_per_cpu
*cpu_buffer
,
884 struct buffer_page
*head
,
885 struct buffer_page
*prev
,
888 return rb_head_page_set(cpu_buffer
, head
, prev
,
889 old_flag
, RB_PAGE_UPDATE
);
892 static int rb_head_page_set_head(struct ring_buffer_per_cpu
*cpu_buffer
,
893 struct buffer_page
*head
,
894 struct buffer_page
*prev
,
897 return rb_head_page_set(cpu_buffer
, head
, prev
,
898 old_flag
, RB_PAGE_HEAD
);
901 static int rb_head_page_set_normal(struct ring_buffer_per_cpu
*cpu_buffer
,
902 struct buffer_page
*head
,
903 struct buffer_page
*prev
,
906 return rb_head_page_set(cpu_buffer
, head
, prev
,
907 old_flag
, RB_PAGE_NORMAL
);
910 static inline void rb_inc_page(struct ring_buffer_per_cpu
*cpu_buffer
,
911 struct buffer_page
**bpage
)
913 struct list_head
*p
= rb_list_head((*bpage
)->list
.next
);
915 *bpage
= list_entry(p
, struct buffer_page
, list
);
918 static struct buffer_page
*
919 rb_set_head_page(struct ring_buffer_per_cpu
*cpu_buffer
)
921 struct buffer_page
*head
;
922 struct buffer_page
*page
;
923 struct list_head
*list
;
926 if (RB_WARN_ON(cpu_buffer
, !cpu_buffer
->head_page
))
930 list
= cpu_buffer
->pages
;
931 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
->next
) != list
))
934 page
= head
= cpu_buffer
->head_page
;
936 * It is possible that the writer moves the header behind
937 * where we started, and we miss in one loop.
938 * A second loop should grab the header, but we'll do
939 * three loops just because I'm paranoid.
941 for (i
= 0; i
< 3; i
++) {
943 if (rb_is_head_page(cpu_buffer
, page
, page
->list
.prev
)) {
944 cpu_buffer
->head_page
= page
;
947 rb_inc_page(cpu_buffer
, &page
);
948 } while (page
!= head
);
951 RB_WARN_ON(cpu_buffer
, 1);
956 static int rb_head_page_replace(struct buffer_page
*old
,
957 struct buffer_page
*new)
959 unsigned long *ptr
= (unsigned long *)&old
->list
.prev
->next
;
963 val
= *ptr
& ~RB_FLAG_MASK
;
966 ret
= cmpxchg(ptr
, val
, (unsigned long)&new->list
);
972 * rb_tail_page_update - move the tail page forward
974 * Returns 1 if moved tail page, 0 if someone else did.
976 static int rb_tail_page_update(struct ring_buffer_per_cpu
*cpu_buffer
,
977 struct buffer_page
*tail_page
,
978 struct buffer_page
*next_page
)
980 struct buffer_page
*old_tail
;
981 unsigned long old_entries
;
982 unsigned long old_write
;
986 * The tail page now needs to be moved forward.
988 * We need to reset the tail page, but without messing
989 * with possible erasing of data brought in by interrupts
990 * that have moved the tail page and are currently on it.
992 * We add a counter to the write field to denote this.
994 old_write
= local_add_return(RB_WRITE_INTCNT
, &next_page
->write
);
995 old_entries
= local_add_return(RB_WRITE_INTCNT
, &next_page
->entries
);
998 * Just make sure we have seen our old_write and synchronize
999 * with any interrupts that come in.
1004 * If the tail page is still the same as what we think
1005 * it is, then it is up to us to update the tail
1008 if (tail_page
== cpu_buffer
->tail_page
) {
1009 /* Zero the write counter */
1010 unsigned long val
= old_write
& ~RB_WRITE_MASK
;
1011 unsigned long eval
= old_entries
& ~RB_WRITE_MASK
;
1014 * This will only succeed if an interrupt did
1015 * not come in and change it. In which case, we
1016 * do not want to modify it.
1018 * We add (void) to let the compiler know that we do not care
1019 * about the return value of these functions. We use the
1020 * cmpxchg to only update if an interrupt did not already
1021 * do it for us. If the cmpxchg fails, we don't care.
1023 (void)local_cmpxchg(&next_page
->write
, old_write
, val
);
1024 (void)local_cmpxchg(&next_page
->entries
, old_entries
, eval
);
1027 * No need to worry about races with clearing out the commit.
1028 * it only can increment when a commit takes place. But that
1029 * only happens in the outer most nested commit.
1031 local_set(&next_page
->page
->commit
, 0);
1033 old_tail
= cmpxchg(&cpu_buffer
->tail_page
,
1034 tail_page
, next_page
);
1036 if (old_tail
== tail_page
)
1043 static int rb_check_bpage(struct ring_buffer_per_cpu
*cpu_buffer
,
1044 struct buffer_page
*bpage
)
1046 unsigned long val
= (unsigned long)bpage
;
1048 if (RB_WARN_ON(cpu_buffer
, val
& RB_FLAG_MASK
))
1055 * rb_check_list - make sure a pointer to a list has the last bits zero
1057 static int rb_check_list(struct ring_buffer_per_cpu
*cpu_buffer
,
1058 struct list_head
*list
)
1060 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
) != list
->prev
))
1062 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->next
) != list
->next
))
1068 * rb_check_pages - integrity check of buffer pages
1069 * @cpu_buffer: CPU buffer with pages to test
1071 * As a safety measure we check to make sure the data pages have not
1074 static int rb_check_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
1076 struct list_head
*head
= cpu_buffer
->pages
;
1077 struct buffer_page
*bpage
, *tmp
;
1079 /* Reset the head page if it exists */
1080 if (cpu_buffer
->head_page
)
1081 rb_set_head_page(cpu_buffer
);
1083 rb_head_page_deactivate(cpu_buffer
);
1085 if (RB_WARN_ON(cpu_buffer
, head
->next
->prev
!= head
))
1087 if (RB_WARN_ON(cpu_buffer
, head
->prev
->next
!= head
))
1090 if (rb_check_list(cpu_buffer
, head
))
1093 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
1094 if (RB_WARN_ON(cpu_buffer
,
1095 bpage
->list
.next
->prev
!= &bpage
->list
))
1097 if (RB_WARN_ON(cpu_buffer
,
1098 bpage
->list
.prev
->next
!= &bpage
->list
))
1100 if (rb_check_list(cpu_buffer
, &bpage
->list
))
1104 rb_head_page_activate(cpu_buffer
);
1109 static int __rb_allocate_pages(int nr_pages
, struct list_head
*pages
, int cpu
)
1112 struct buffer_page
*bpage
, *tmp
;
1114 for (i
= 0; i
< nr_pages
; i
++) {
1117 * __GFP_NORETRY flag makes sure that the allocation fails
1118 * gracefully without invoking oom-killer and the system is
1121 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
1122 GFP_KERNEL
| __GFP_NORETRY
,
1127 list_add(&bpage
->list
, pages
);
1129 page
= alloc_pages_node(cpu_to_node(cpu
),
1130 GFP_KERNEL
| __GFP_NORETRY
, 0);
1133 bpage
->page
= page_address(page
);
1134 rb_init_page(bpage
->page
);
1140 list_for_each_entry_safe(bpage
, tmp
, pages
, list
) {
1141 list_del_init(&bpage
->list
);
1142 free_buffer_page(bpage
);
1148 static int rb_allocate_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
1155 if (__rb_allocate_pages(nr_pages
, &pages
, cpu_buffer
->cpu
))
1159 * The ring buffer page list is a circular list that does not
1160 * start and end with a list head. All page list items point to
1163 cpu_buffer
->pages
= pages
.next
;
1166 cpu_buffer
->nr_pages
= nr_pages
;
1168 rb_check_pages(cpu_buffer
);
1173 static struct ring_buffer_per_cpu
*
1174 rb_allocate_cpu_buffer(struct ring_buffer
*buffer
, int nr_pages
, int cpu
)
1176 struct ring_buffer_per_cpu
*cpu_buffer
;
1177 struct buffer_page
*bpage
;
1181 cpu_buffer
= kzalloc_node(ALIGN(sizeof(*cpu_buffer
), cache_line_size()),
1182 GFP_KERNEL
, cpu_to_node(cpu
));
1186 cpu_buffer
->cpu
= cpu
;
1187 cpu_buffer
->buffer
= buffer
;
1188 raw_spin_lock_init(&cpu_buffer
->reader_lock
);
1189 lockdep_set_class(&cpu_buffer
->reader_lock
, buffer
->reader_lock_key
);
1190 cpu_buffer
->lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
1191 INIT_WORK(&cpu_buffer
->update_pages_work
, update_pages_handler
);
1192 init_completion(&cpu_buffer
->update_done
);
1193 init_irq_work(&cpu_buffer
->irq_work
.work
, rb_wake_up_waiters
);
1194 init_waitqueue_head(&cpu_buffer
->irq_work
.waiters
);
1196 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
1197 GFP_KERNEL
, cpu_to_node(cpu
));
1199 goto fail_free_buffer
;
1201 rb_check_bpage(cpu_buffer
, bpage
);
1203 cpu_buffer
->reader_page
= bpage
;
1204 page
= alloc_pages_node(cpu_to_node(cpu
), GFP_KERNEL
, 0);
1206 goto fail_free_reader
;
1207 bpage
->page
= page_address(page
);
1208 rb_init_page(bpage
->page
);
1210 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
1211 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1213 ret
= rb_allocate_pages(cpu_buffer
, nr_pages
);
1215 goto fail_free_reader
;
1217 cpu_buffer
->head_page
1218 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
1219 cpu_buffer
->tail_page
= cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
1221 rb_head_page_activate(cpu_buffer
);
1226 free_buffer_page(cpu_buffer
->reader_page
);
1233 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu
*cpu_buffer
)
1235 struct list_head
*head
= cpu_buffer
->pages
;
1236 struct buffer_page
*bpage
, *tmp
;
1238 free_buffer_page(cpu_buffer
->reader_page
);
1240 rb_head_page_deactivate(cpu_buffer
);
1243 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
1244 list_del_init(&bpage
->list
);
1245 free_buffer_page(bpage
);
1247 bpage
= list_entry(head
, struct buffer_page
, list
);
1248 free_buffer_page(bpage
);
1254 #ifdef CONFIG_HOTPLUG_CPU
1255 static int rb_cpu_notify(struct notifier_block
*self
,
1256 unsigned long action
, void *hcpu
);
1260 * __ring_buffer_alloc - allocate a new ring_buffer
1261 * @size: the size in bytes per cpu that is needed.
1262 * @flags: attributes to set for the ring buffer.
1264 * Currently the only flag that is available is the RB_FL_OVERWRITE
1265 * flag. This flag means that the buffer will overwrite old data
1266 * when the buffer wraps. If this flag is not set, the buffer will
1267 * drop data when the tail hits the head.
1269 struct ring_buffer
*__ring_buffer_alloc(unsigned long size
, unsigned flags
,
1270 struct lock_class_key
*key
)
1272 struct ring_buffer
*buffer
;
1276 /* keep it in its own cache line */
1277 buffer
= kzalloc(ALIGN(sizeof(*buffer
), cache_line_size()),
1282 if (!alloc_cpumask_var(&buffer
->cpumask
, GFP_KERNEL
))
1283 goto fail_free_buffer
;
1285 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1286 buffer
->flags
= flags
;
1287 buffer
->clock
= trace_clock_local
;
1288 buffer
->reader_lock_key
= key
;
1290 init_irq_work(&buffer
->irq_work
.work
, rb_wake_up_waiters
);
1291 init_waitqueue_head(&buffer
->irq_work
.waiters
);
1293 /* need at least two pages */
1298 * In case of non-hotplug cpu, if the ring-buffer is allocated
1299 * in early initcall, it will not be notified of secondary cpus.
1300 * In that off case, we need to allocate for all possible cpus.
1302 #ifdef CONFIG_HOTPLUG_CPU
1303 cpu_notifier_register_begin();
1304 cpumask_copy(buffer
->cpumask
, cpu_online_mask
);
1306 cpumask_copy(buffer
->cpumask
, cpu_possible_mask
);
1308 buffer
->cpus
= nr_cpu_ids
;
1310 bsize
= sizeof(void *) * nr_cpu_ids
;
1311 buffer
->buffers
= kzalloc(ALIGN(bsize
, cache_line_size()),
1313 if (!buffer
->buffers
)
1314 goto fail_free_cpumask
;
1316 for_each_buffer_cpu(buffer
, cpu
) {
1317 buffer
->buffers
[cpu
] =
1318 rb_allocate_cpu_buffer(buffer
, nr_pages
, cpu
);
1319 if (!buffer
->buffers
[cpu
])
1320 goto fail_free_buffers
;
1323 #ifdef CONFIG_HOTPLUG_CPU
1324 buffer
->cpu_notify
.notifier_call
= rb_cpu_notify
;
1325 buffer
->cpu_notify
.priority
= 0;
1326 __register_cpu_notifier(&buffer
->cpu_notify
);
1327 cpu_notifier_register_done();
1330 mutex_init(&buffer
->mutex
);
1335 for_each_buffer_cpu(buffer
, cpu
) {
1336 if (buffer
->buffers
[cpu
])
1337 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1339 kfree(buffer
->buffers
);
1342 free_cpumask_var(buffer
->cpumask
);
1343 #ifdef CONFIG_HOTPLUG_CPU
1344 cpu_notifier_register_done();
1351 EXPORT_SYMBOL_GPL(__ring_buffer_alloc
);
1354 * ring_buffer_free - free a ring buffer.
1355 * @buffer: the buffer to free.
1358 ring_buffer_free(struct ring_buffer
*buffer
)
1362 #ifdef CONFIG_HOTPLUG_CPU
1363 cpu_notifier_register_begin();
1364 __unregister_cpu_notifier(&buffer
->cpu_notify
);
1367 for_each_buffer_cpu(buffer
, cpu
)
1368 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1370 #ifdef CONFIG_HOTPLUG_CPU
1371 cpu_notifier_register_done();
1374 kfree(buffer
->buffers
);
1375 free_cpumask_var(buffer
->cpumask
);
1379 EXPORT_SYMBOL_GPL(ring_buffer_free
);
1381 void ring_buffer_set_clock(struct ring_buffer
*buffer
,
1384 buffer
->clock
= clock
;
1387 static void rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
);
1389 static inline unsigned long rb_page_entries(struct buffer_page
*bpage
)
1391 return local_read(&bpage
->entries
) & RB_WRITE_MASK
;
1394 static inline unsigned long rb_page_write(struct buffer_page
*bpage
)
1396 return local_read(&bpage
->write
) & RB_WRITE_MASK
;
1400 rb_remove_pages(struct ring_buffer_per_cpu
*cpu_buffer
, unsigned int nr_pages
)
1402 struct list_head
*tail_page
, *to_remove
, *next_page
;
1403 struct buffer_page
*to_remove_page
, *tmp_iter_page
;
1404 struct buffer_page
*last_page
, *first_page
;
1405 unsigned int nr_removed
;
1406 unsigned long head_bit
;
1411 raw_spin_lock_irq(&cpu_buffer
->reader_lock
);
1412 atomic_inc(&cpu_buffer
->record_disabled
);
1414 * We don't race with the readers since we have acquired the reader
1415 * lock. We also don't race with writers after disabling recording.
1416 * This makes it easy to figure out the first and the last page to be
1417 * removed from the list. We unlink all the pages in between including
1418 * the first and last pages. This is done in a busy loop so that we
1419 * lose the least number of traces.
1420 * The pages are freed after we restart recording and unlock readers.
1422 tail_page
= &cpu_buffer
->tail_page
->list
;
1425 * tail page might be on reader page, we remove the next page
1426 * from the ring buffer
1428 if (cpu_buffer
->tail_page
== cpu_buffer
->reader_page
)
1429 tail_page
= rb_list_head(tail_page
->next
);
1430 to_remove
= tail_page
;
1432 /* start of pages to remove */
1433 first_page
= list_entry(rb_list_head(to_remove
->next
),
1434 struct buffer_page
, list
);
1436 for (nr_removed
= 0; nr_removed
< nr_pages
; nr_removed
++) {
1437 to_remove
= rb_list_head(to_remove
)->next
;
1438 head_bit
|= (unsigned long)to_remove
& RB_PAGE_HEAD
;
1441 next_page
= rb_list_head(to_remove
)->next
;
1444 * Now we remove all pages between tail_page and next_page.
1445 * Make sure that we have head_bit value preserved for the
1448 tail_page
->next
= (struct list_head
*)((unsigned long)next_page
|
1450 next_page
= rb_list_head(next_page
);
1451 next_page
->prev
= tail_page
;
1453 /* make sure pages points to a valid page in the ring buffer */
1454 cpu_buffer
->pages
= next_page
;
1456 /* update head page */
1458 cpu_buffer
->head_page
= list_entry(next_page
,
1459 struct buffer_page
, list
);
1462 * change read pointer to make sure any read iterators reset
1465 cpu_buffer
->read
= 0;
1467 /* pages are removed, resume tracing and then free the pages */
1468 atomic_dec(&cpu_buffer
->record_disabled
);
1469 raw_spin_unlock_irq(&cpu_buffer
->reader_lock
);
1471 RB_WARN_ON(cpu_buffer
, list_empty(cpu_buffer
->pages
));
1473 /* last buffer page to remove */
1474 last_page
= list_entry(rb_list_head(to_remove
), struct buffer_page
,
1476 tmp_iter_page
= first_page
;
1479 to_remove_page
= tmp_iter_page
;
1480 rb_inc_page(cpu_buffer
, &tmp_iter_page
);
1482 /* update the counters */
1483 page_entries
= rb_page_entries(to_remove_page
);
1486 * If something was added to this page, it was full
1487 * since it is not the tail page. So we deduct the
1488 * bytes consumed in ring buffer from here.
1489 * Increment overrun to account for the lost events.
1491 local_add(page_entries
, &cpu_buffer
->overrun
);
1492 local_sub(BUF_PAGE_SIZE
, &cpu_buffer
->entries_bytes
);
1496 * We have already removed references to this list item, just
1497 * free up the buffer_page and its page
1499 free_buffer_page(to_remove_page
);
1502 } while (to_remove_page
!= last_page
);
1504 RB_WARN_ON(cpu_buffer
, nr_removed
);
1506 return nr_removed
== 0;
1510 rb_insert_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
1512 struct list_head
*pages
= &cpu_buffer
->new_pages
;
1513 int retries
, success
;
1515 raw_spin_lock_irq(&cpu_buffer
->reader_lock
);
1517 * We are holding the reader lock, so the reader page won't be swapped
1518 * in the ring buffer. Now we are racing with the writer trying to
1519 * move head page and the tail page.
1520 * We are going to adapt the reader page update process where:
1521 * 1. We first splice the start and end of list of new pages between
1522 * the head page and its previous page.
1523 * 2. We cmpxchg the prev_page->next to point from head page to the
1524 * start of new pages list.
1525 * 3. Finally, we update the head->prev to the end of new list.
1527 * We will try this process 10 times, to make sure that we don't keep
1533 struct list_head
*head_page
, *prev_page
, *r
;
1534 struct list_head
*last_page
, *first_page
;
1535 struct list_head
*head_page_with_bit
;
1537 head_page
= &rb_set_head_page(cpu_buffer
)->list
;
1540 prev_page
= head_page
->prev
;
1542 first_page
= pages
->next
;
1543 last_page
= pages
->prev
;
1545 head_page_with_bit
= (struct list_head
*)
1546 ((unsigned long)head_page
| RB_PAGE_HEAD
);
1548 last_page
->next
= head_page_with_bit
;
1549 first_page
->prev
= prev_page
;
1551 r
= cmpxchg(&prev_page
->next
, head_page_with_bit
, first_page
);
1553 if (r
== head_page_with_bit
) {
1555 * yay, we replaced the page pointer to our new list,
1556 * now, we just have to update to head page's prev
1557 * pointer to point to end of list
1559 head_page
->prev
= last_page
;
1566 INIT_LIST_HEAD(pages
);
1568 * If we weren't successful in adding in new pages, warn and stop
1571 RB_WARN_ON(cpu_buffer
, !success
);
1572 raw_spin_unlock_irq(&cpu_buffer
->reader_lock
);
1574 /* free pages if they weren't inserted */
1576 struct buffer_page
*bpage
, *tmp
;
1577 list_for_each_entry_safe(bpage
, tmp
, &cpu_buffer
->new_pages
,
1579 list_del_init(&bpage
->list
);
1580 free_buffer_page(bpage
);
1586 static void rb_update_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
1590 if (cpu_buffer
->nr_pages_to_update
> 0)
1591 success
= rb_insert_pages(cpu_buffer
);
1593 success
= rb_remove_pages(cpu_buffer
,
1594 -cpu_buffer
->nr_pages_to_update
);
1597 cpu_buffer
->nr_pages
+= cpu_buffer
->nr_pages_to_update
;
1600 static void update_pages_handler(struct work_struct
*work
)
1602 struct ring_buffer_per_cpu
*cpu_buffer
= container_of(work
,
1603 struct ring_buffer_per_cpu
, update_pages_work
);
1604 rb_update_pages(cpu_buffer
);
1605 complete(&cpu_buffer
->update_done
);
1609 * ring_buffer_resize - resize the ring buffer
1610 * @buffer: the buffer to resize.
1611 * @size: the new size.
1612 * @cpu_id: the cpu buffer to resize
1614 * Minimum size is 2 * BUF_PAGE_SIZE.
1616 * Returns 0 on success and < 0 on failure.
1618 int ring_buffer_resize(struct ring_buffer
*buffer
, unsigned long size
,
1621 struct ring_buffer_per_cpu
*cpu_buffer
;
1626 * Always succeed at resizing a non-existent buffer:
1631 /* Make sure the requested buffer exists */
1632 if (cpu_id
!= RING_BUFFER_ALL_CPUS
&&
1633 !cpumask_test_cpu(cpu_id
, buffer
->cpumask
))
1636 size
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1637 size
*= BUF_PAGE_SIZE
;
1639 /* we need a minimum of two pages */
1640 if (size
< BUF_PAGE_SIZE
* 2)
1641 size
= BUF_PAGE_SIZE
* 2;
1643 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1646 * Don't succeed if resizing is disabled, as a reader might be
1647 * manipulating the ring buffer and is expecting a sane state while
1650 if (atomic_read(&buffer
->resize_disabled
))
1653 /* prevent another thread from changing buffer sizes */
1654 mutex_lock(&buffer
->mutex
);
1656 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
1657 /* calculate the pages to update */
1658 for_each_buffer_cpu(buffer
, cpu
) {
1659 cpu_buffer
= buffer
->buffers
[cpu
];
1661 cpu_buffer
->nr_pages_to_update
= nr_pages
-
1662 cpu_buffer
->nr_pages
;
1664 * nothing more to do for removing pages or no update
1666 if (cpu_buffer
->nr_pages_to_update
<= 0)
1669 * to add pages, make sure all new pages can be
1670 * allocated without receiving ENOMEM
1672 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1673 if (__rb_allocate_pages(cpu_buffer
->nr_pages_to_update
,
1674 &cpu_buffer
->new_pages
, cpu
)) {
1675 /* not enough memory for new pages */
1683 * Fire off all the required work handlers
1684 * We can't schedule on offline CPUs, but it's not necessary
1685 * since we can change their buffer sizes without any race.
1687 for_each_buffer_cpu(buffer
, cpu
) {
1688 cpu_buffer
= buffer
->buffers
[cpu
];
1689 if (!cpu_buffer
->nr_pages_to_update
)
1692 /* The update must run on the CPU that is being updated. */
1694 if (cpu
== smp_processor_id() || !cpu_online(cpu
)) {
1695 rb_update_pages(cpu_buffer
);
1696 cpu_buffer
->nr_pages_to_update
= 0;
1699 * Can not disable preemption for schedule_work_on()
1703 schedule_work_on(cpu
,
1704 &cpu_buffer
->update_pages_work
);
1710 /* wait for all the updates to complete */
1711 for_each_buffer_cpu(buffer
, cpu
) {
1712 cpu_buffer
= buffer
->buffers
[cpu
];
1713 if (!cpu_buffer
->nr_pages_to_update
)
1716 if (cpu_online(cpu
))
1717 wait_for_completion(&cpu_buffer
->update_done
);
1718 cpu_buffer
->nr_pages_to_update
= 0;
1723 /* Make sure this CPU has been intitialized */
1724 if (!cpumask_test_cpu(cpu_id
, buffer
->cpumask
))
1727 cpu_buffer
= buffer
->buffers
[cpu_id
];
1729 if (nr_pages
== cpu_buffer
->nr_pages
)
1732 cpu_buffer
->nr_pages_to_update
= nr_pages
-
1733 cpu_buffer
->nr_pages
;
1735 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1736 if (cpu_buffer
->nr_pages_to_update
> 0 &&
1737 __rb_allocate_pages(cpu_buffer
->nr_pages_to_update
,
1738 &cpu_buffer
->new_pages
, cpu_id
)) {
1746 /* The update must run on the CPU that is being updated. */
1747 if (cpu_id
== smp_processor_id() || !cpu_online(cpu_id
))
1748 rb_update_pages(cpu_buffer
);
1751 * Can not disable preemption for schedule_work_on()
1755 schedule_work_on(cpu_id
,
1756 &cpu_buffer
->update_pages_work
);
1757 wait_for_completion(&cpu_buffer
->update_done
);
1762 cpu_buffer
->nr_pages_to_update
= 0;
1768 * The ring buffer resize can happen with the ring buffer
1769 * enabled, so that the update disturbs the tracing as little
1770 * as possible. But if the buffer is disabled, we do not need
1771 * to worry about that, and we can take the time to verify
1772 * that the buffer is not corrupt.
1774 if (atomic_read(&buffer
->record_disabled
)) {
1775 atomic_inc(&buffer
->record_disabled
);
1777 * Even though the buffer was disabled, we must make sure
1778 * that it is truly disabled before calling rb_check_pages.
1779 * There could have been a race between checking
1780 * record_disable and incrementing it.
1782 synchronize_sched();
1783 for_each_buffer_cpu(buffer
, cpu
) {
1784 cpu_buffer
= buffer
->buffers
[cpu
];
1785 rb_check_pages(cpu_buffer
);
1787 atomic_dec(&buffer
->record_disabled
);
1790 mutex_unlock(&buffer
->mutex
);
1794 for_each_buffer_cpu(buffer
, cpu
) {
1795 struct buffer_page
*bpage
, *tmp
;
1797 cpu_buffer
= buffer
->buffers
[cpu
];
1798 cpu_buffer
->nr_pages_to_update
= 0;
1800 if (list_empty(&cpu_buffer
->new_pages
))
1803 list_for_each_entry_safe(bpage
, tmp
, &cpu_buffer
->new_pages
,
1805 list_del_init(&bpage
->list
);
1806 free_buffer_page(bpage
);
1809 mutex_unlock(&buffer
->mutex
);
1812 EXPORT_SYMBOL_GPL(ring_buffer_resize
);
1814 void ring_buffer_change_overwrite(struct ring_buffer
*buffer
, int val
)
1816 mutex_lock(&buffer
->mutex
);
1818 buffer
->flags
|= RB_FL_OVERWRITE
;
1820 buffer
->flags
&= ~RB_FL_OVERWRITE
;
1821 mutex_unlock(&buffer
->mutex
);
1823 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite
);
1825 static inline void *
1826 __rb_data_page_index(struct buffer_data_page
*bpage
, unsigned index
)
1828 return bpage
->data
+ index
;
1831 static inline void *__rb_page_index(struct buffer_page
*bpage
, unsigned index
)
1833 return bpage
->page
->data
+ index
;
1836 static inline struct ring_buffer_event
*
1837 rb_reader_event(struct ring_buffer_per_cpu
*cpu_buffer
)
1839 return __rb_page_index(cpu_buffer
->reader_page
,
1840 cpu_buffer
->reader_page
->read
);
1843 static inline struct ring_buffer_event
*
1844 rb_iter_head_event(struct ring_buffer_iter
*iter
)
1846 return __rb_page_index(iter
->head_page
, iter
->head
);
1849 static inline unsigned rb_page_commit(struct buffer_page
*bpage
)
1851 return local_read(&bpage
->page
->commit
);
1854 /* Size is determined by what has been committed */
1855 static inline unsigned rb_page_size(struct buffer_page
*bpage
)
1857 return rb_page_commit(bpage
);
1860 static inline unsigned
1861 rb_commit_index(struct ring_buffer_per_cpu
*cpu_buffer
)
1863 return rb_page_commit(cpu_buffer
->commit_page
);
1866 static inline unsigned
1867 rb_event_index(struct ring_buffer_event
*event
)
1869 unsigned long addr
= (unsigned long)event
;
1871 return (addr
& ~PAGE_MASK
) - BUF_PAGE_HDR_SIZE
;
1875 rb_event_is_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
1876 struct ring_buffer_event
*event
)
1878 unsigned long addr
= (unsigned long)event
;
1879 unsigned long index
;
1881 index
= rb_event_index(event
);
1884 return cpu_buffer
->commit_page
->page
== (void *)addr
&&
1885 rb_commit_index(cpu_buffer
) == index
;
1889 rb_set_commit_to_write(struct ring_buffer_per_cpu
*cpu_buffer
)
1891 unsigned long max_count
;
1894 * We only race with interrupts and NMIs on this CPU.
1895 * If we own the commit event, then we can commit
1896 * all others that interrupted us, since the interruptions
1897 * are in stack format (they finish before they come
1898 * back to us). This allows us to do a simple loop to
1899 * assign the commit to the tail.
1902 max_count
= cpu_buffer
->nr_pages
* 100;
1904 while (cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
) {
1905 if (RB_WARN_ON(cpu_buffer
, !(--max_count
)))
1907 if (RB_WARN_ON(cpu_buffer
,
1908 rb_is_reader_page(cpu_buffer
->tail_page
)))
1910 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1911 rb_page_write(cpu_buffer
->commit_page
));
1912 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
1913 cpu_buffer
->write_stamp
=
1914 cpu_buffer
->commit_page
->page
->time_stamp
;
1915 /* add barrier to keep gcc from optimizing too much */
1918 while (rb_commit_index(cpu_buffer
) !=
1919 rb_page_write(cpu_buffer
->commit_page
)) {
1921 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1922 rb_page_write(cpu_buffer
->commit_page
));
1923 RB_WARN_ON(cpu_buffer
,
1924 local_read(&cpu_buffer
->commit_page
->page
->commit
) &
1929 /* again, keep gcc from optimizing */
1933 * If an interrupt came in just after the first while loop
1934 * and pushed the tail page forward, we will be left with
1935 * a dangling commit that will never go forward.
1937 if (unlikely(cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
))
1941 static void rb_reset_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
1943 cpu_buffer
->read_stamp
= cpu_buffer
->reader_page
->page
->time_stamp
;
1944 cpu_buffer
->reader_page
->read
= 0;
1947 static void rb_inc_iter(struct ring_buffer_iter
*iter
)
1949 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1952 * The iterator could be on the reader page (it starts there).
1953 * But the head could have moved, since the reader was
1954 * found. Check for this case and assign the iterator
1955 * to the head page instead of next.
1957 if (iter
->head_page
== cpu_buffer
->reader_page
)
1958 iter
->head_page
= rb_set_head_page(cpu_buffer
);
1960 rb_inc_page(cpu_buffer
, &iter
->head_page
);
1962 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
1966 /* Slow path, do not inline */
1967 static noinline
struct ring_buffer_event
*
1968 rb_add_time_stamp(struct ring_buffer_event
*event
, u64 delta
)
1970 event
->type_len
= RINGBUF_TYPE_TIME_EXTEND
;
1972 /* Not the first event on the page? */
1973 if (rb_event_index(event
)) {
1974 event
->time_delta
= delta
& TS_MASK
;
1975 event
->array
[0] = delta
>> TS_SHIFT
;
1977 /* nope, just zero it */
1978 event
->time_delta
= 0;
1979 event
->array
[0] = 0;
1982 return skip_time_extend(event
);
1986 * rb_update_event - update event type and data
1987 * @event: the even to update
1988 * @type: the type of event
1989 * @length: the size of the event field in the ring buffer
1991 * Update the type and data fields of the event. The length
1992 * is the actual size that is written to the ring buffer,
1993 * and with this, we can determine what to place into the
1997 rb_update_event(struct ring_buffer_per_cpu
*cpu_buffer
,
1998 struct ring_buffer_event
*event
, unsigned length
,
1999 int add_timestamp
, u64 delta
)
2001 /* Only a commit updates the timestamp */
2002 if (unlikely(!rb_event_is_commit(cpu_buffer
, event
)))
2006 * If we need to add a timestamp, then we
2007 * add it to the start of the resevered space.
2009 if (unlikely(add_timestamp
)) {
2010 event
= rb_add_time_stamp(event
, delta
);
2011 length
-= RB_LEN_TIME_EXTEND
;
2015 event
->time_delta
= delta
;
2016 length
-= RB_EVNT_HDR_SIZE
;
2017 if (length
> RB_MAX_SMALL_DATA
|| RB_FORCE_8BYTE_ALIGNMENT
) {
2018 event
->type_len
= 0;
2019 event
->array
[0] = length
;
2021 event
->type_len
= DIV_ROUND_UP(length
, RB_ALIGNMENT
);
2025 * rb_handle_head_page - writer hit the head page
2027 * Returns: +1 to retry page
2032 rb_handle_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
2033 struct buffer_page
*tail_page
,
2034 struct buffer_page
*next_page
)
2036 struct buffer_page
*new_head
;
2041 entries
= rb_page_entries(next_page
);
2044 * The hard part is here. We need to move the head
2045 * forward, and protect against both readers on
2046 * other CPUs and writers coming in via interrupts.
2048 type
= rb_head_page_set_update(cpu_buffer
, next_page
, tail_page
,
2052 * type can be one of four:
2053 * NORMAL - an interrupt already moved it for us
2054 * HEAD - we are the first to get here.
2055 * UPDATE - we are the interrupt interrupting
2057 * MOVED - a reader on another CPU moved the next
2058 * pointer to its reader page. Give up
2065 * We changed the head to UPDATE, thus
2066 * it is our responsibility to update
2069 local_add(entries
, &cpu_buffer
->overrun
);
2070 local_sub(BUF_PAGE_SIZE
, &cpu_buffer
->entries_bytes
);
2073 * The entries will be zeroed out when we move the
2077 /* still more to do */
2080 case RB_PAGE_UPDATE
:
2082 * This is an interrupt that interrupt the
2083 * previous update. Still more to do.
2086 case RB_PAGE_NORMAL
:
2088 * An interrupt came in before the update
2089 * and processed this for us.
2090 * Nothing left to do.
2095 * The reader is on another CPU and just did
2096 * a swap with our next_page.
2101 RB_WARN_ON(cpu_buffer
, 1); /* WTF??? */
2106 * Now that we are here, the old head pointer is
2107 * set to UPDATE. This will keep the reader from
2108 * swapping the head page with the reader page.
2109 * The reader (on another CPU) will spin till
2112 * We just need to protect against interrupts
2113 * doing the job. We will set the next pointer
2114 * to HEAD. After that, we set the old pointer
2115 * to NORMAL, but only if it was HEAD before.
2116 * otherwise we are an interrupt, and only
2117 * want the outer most commit to reset it.
2119 new_head
= next_page
;
2120 rb_inc_page(cpu_buffer
, &new_head
);
2122 ret
= rb_head_page_set_head(cpu_buffer
, new_head
, next_page
,
2126 * Valid returns are:
2127 * HEAD - an interrupt came in and already set it.
2128 * NORMAL - One of two things:
2129 * 1) We really set it.
2130 * 2) A bunch of interrupts came in and moved
2131 * the page forward again.
2135 case RB_PAGE_NORMAL
:
2139 RB_WARN_ON(cpu_buffer
, 1);
2144 * It is possible that an interrupt came in,
2145 * set the head up, then more interrupts came in
2146 * and moved it again. When we get back here,
2147 * the page would have been set to NORMAL but we
2148 * just set it back to HEAD.
2150 * How do you detect this? Well, if that happened
2151 * the tail page would have moved.
2153 if (ret
== RB_PAGE_NORMAL
) {
2155 * If the tail had moved passed next, then we need
2156 * to reset the pointer.
2158 if (cpu_buffer
->tail_page
!= tail_page
&&
2159 cpu_buffer
->tail_page
!= next_page
)
2160 rb_head_page_set_normal(cpu_buffer
, new_head
,
2166 * If this was the outer most commit (the one that
2167 * changed the original pointer from HEAD to UPDATE),
2168 * then it is up to us to reset it to NORMAL.
2170 if (type
== RB_PAGE_HEAD
) {
2171 ret
= rb_head_page_set_normal(cpu_buffer
, next_page
,
2174 if (RB_WARN_ON(cpu_buffer
,
2175 ret
!= RB_PAGE_UPDATE
))
2182 static unsigned rb_calculate_event_length(unsigned length
)
2184 struct ring_buffer_event event
; /* Used only for sizeof array */
2186 /* zero length can cause confusions */
2190 if (length
> RB_MAX_SMALL_DATA
|| RB_FORCE_8BYTE_ALIGNMENT
)
2191 length
+= sizeof(event
.array
[0]);
2193 length
+= RB_EVNT_HDR_SIZE
;
2194 length
= ALIGN(length
, RB_ARCH_ALIGNMENT
);
2200 rb_reset_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
2201 struct buffer_page
*tail_page
,
2202 unsigned long tail
, unsigned long length
)
2204 struct ring_buffer_event
*event
;
2207 * Only the event that crossed the page boundary
2208 * must fill the old tail_page with padding.
2210 if (tail
>= BUF_PAGE_SIZE
) {
2212 * If the page was filled, then we still need
2213 * to update the real_end. Reset it to zero
2214 * and the reader will ignore it.
2216 if (tail
== BUF_PAGE_SIZE
)
2217 tail_page
->real_end
= 0;
2219 local_sub(length
, &tail_page
->write
);
2223 event
= __rb_page_index(tail_page
, tail
);
2224 kmemcheck_annotate_bitfield(event
, bitfield
);
2226 /* account for padding bytes */
2227 local_add(BUF_PAGE_SIZE
- tail
, &cpu_buffer
->entries_bytes
);
2230 * Save the original length to the meta data.
2231 * This will be used by the reader to add lost event
2234 tail_page
->real_end
= tail
;
2237 * If this event is bigger than the minimum size, then
2238 * we need to be careful that we don't subtract the
2239 * write counter enough to allow another writer to slip
2241 * We put in a discarded commit instead, to make sure
2242 * that this space is not used again.
2244 * If we are less than the minimum size, we don't need to
2247 if (tail
> (BUF_PAGE_SIZE
- RB_EVNT_MIN_SIZE
)) {
2248 /* No room for any events */
2250 /* Mark the rest of the page with padding */
2251 rb_event_set_padding(event
);
2253 /* Set the write back to the previous setting */
2254 local_sub(length
, &tail_page
->write
);
2258 /* Put in a discarded event */
2259 event
->array
[0] = (BUF_PAGE_SIZE
- tail
) - RB_EVNT_HDR_SIZE
;
2260 event
->type_len
= RINGBUF_TYPE_PADDING
;
2261 /* time delta must be non zero */
2262 event
->time_delta
= 1;
2264 /* Set write to end of buffer */
2265 length
= (tail
+ length
) - BUF_PAGE_SIZE
;
2266 local_sub(length
, &tail_page
->write
);
2270 * This is the slow path, force gcc not to inline it.
2272 static noinline
struct ring_buffer_event
*
2273 rb_move_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
2274 unsigned long length
, unsigned long tail
,
2275 struct buffer_page
*tail_page
, u64 ts
)
2277 struct buffer_page
*commit_page
= cpu_buffer
->commit_page
;
2278 struct ring_buffer
*buffer
= cpu_buffer
->buffer
;
2279 struct buffer_page
*next_page
;
2282 next_page
= tail_page
;
2284 rb_inc_page(cpu_buffer
, &next_page
);
2287 * If for some reason, we had an interrupt storm that made
2288 * it all the way around the buffer, bail, and warn
2291 if (unlikely(next_page
== commit_page
)) {
2292 local_inc(&cpu_buffer
->commit_overrun
);
2297 * This is where the fun begins!
2299 * We are fighting against races between a reader that
2300 * could be on another CPU trying to swap its reader
2301 * page with the buffer head.
2303 * We are also fighting against interrupts coming in and
2304 * moving the head or tail on us as well.
2306 * If the next page is the head page then we have filled
2307 * the buffer, unless the commit page is still on the
2310 if (rb_is_head_page(cpu_buffer
, next_page
, &tail_page
->list
)) {
2313 * If the commit is not on the reader page, then
2314 * move the header page.
2316 if (!rb_is_reader_page(cpu_buffer
->commit_page
)) {
2318 * If we are not in overwrite mode,
2319 * this is easy, just stop here.
2321 if (!(buffer
->flags
& RB_FL_OVERWRITE
)) {
2322 local_inc(&cpu_buffer
->dropped_events
);
2326 ret
= rb_handle_head_page(cpu_buffer
,
2335 * We need to be careful here too. The
2336 * commit page could still be on the reader
2337 * page. We could have a small buffer, and
2338 * have filled up the buffer with events
2339 * from interrupts and such, and wrapped.
2341 * Note, if the tail page is also the on the
2342 * reader_page, we let it move out.
2344 if (unlikely((cpu_buffer
->commit_page
!=
2345 cpu_buffer
->tail_page
) &&
2346 (cpu_buffer
->commit_page
==
2347 cpu_buffer
->reader_page
))) {
2348 local_inc(&cpu_buffer
->commit_overrun
);
2354 ret
= rb_tail_page_update(cpu_buffer
, tail_page
, next_page
);
2357 * Nested commits always have zero deltas, so
2358 * just reread the time stamp
2360 ts
= rb_time_stamp(buffer
);
2361 next_page
->page
->time_stamp
= ts
;
2366 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
2368 /* fail and let the caller try again */
2369 return ERR_PTR(-EAGAIN
);
2373 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
2378 static struct ring_buffer_event
*
2379 __rb_reserve_next(struct ring_buffer_per_cpu
*cpu_buffer
,
2380 unsigned long length
, u64 ts
,
2381 u64 delta
, int add_timestamp
)
2383 struct buffer_page
*tail_page
;
2384 struct ring_buffer_event
*event
;
2385 unsigned long tail
, write
;
2388 * If the time delta since the last event is too big to
2389 * hold in the time field of the event, then we append a
2390 * TIME EXTEND event ahead of the data event.
2392 if (unlikely(add_timestamp
))
2393 length
+= RB_LEN_TIME_EXTEND
;
2395 tail_page
= cpu_buffer
->tail_page
;
2396 write
= local_add_return(length
, &tail_page
->write
);
2398 /* set write to only the index of the write */
2399 write
&= RB_WRITE_MASK
;
2400 tail
= write
- length
;
2403 * If this is the first commit on the page, then it has the same
2404 * timestamp as the page itself.
2409 /* See if we shot pass the end of this buffer page */
2410 if (unlikely(write
> BUF_PAGE_SIZE
))
2411 return rb_move_tail(cpu_buffer
, length
, tail
,
2414 /* We reserved something on the buffer */
2416 event
= __rb_page_index(tail_page
, tail
);
2417 kmemcheck_annotate_bitfield(event
, bitfield
);
2418 rb_update_event(cpu_buffer
, event
, length
, add_timestamp
, delta
);
2420 local_inc(&tail_page
->entries
);
2423 * If this is the first commit on the page, then update
2427 tail_page
->page
->time_stamp
= ts
;
2429 /* account for these added bytes */
2430 local_add(length
, &cpu_buffer
->entries_bytes
);
2436 rb_try_to_discard(struct ring_buffer_per_cpu
*cpu_buffer
,
2437 struct ring_buffer_event
*event
)
2439 unsigned long new_index
, old_index
;
2440 struct buffer_page
*bpage
;
2441 unsigned long index
;
2444 new_index
= rb_event_index(event
);
2445 old_index
= new_index
+ rb_event_ts_length(event
);
2446 addr
= (unsigned long)event
;
2449 bpage
= cpu_buffer
->tail_page
;
2451 if (bpage
->page
== (void *)addr
&& rb_page_write(bpage
) == old_index
) {
2452 unsigned long write_mask
=
2453 local_read(&bpage
->write
) & ~RB_WRITE_MASK
;
2454 unsigned long event_length
= rb_event_length(event
);
2456 * This is on the tail page. It is possible that
2457 * a write could come in and move the tail page
2458 * and write to the next page. That is fine
2459 * because we just shorten what is on this page.
2461 old_index
+= write_mask
;
2462 new_index
+= write_mask
;
2463 index
= local_cmpxchg(&bpage
->write
, old_index
, new_index
);
2464 if (index
== old_index
) {
2465 /* update counters */
2466 local_sub(event_length
, &cpu_buffer
->entries_bytes
);
2471 /* could not discard */
2475 static void rb_start_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2477 local_inc(&cpu_buffer
->committing
);
2478 local_inc(&cpu_buffer
->commits
);
2481 static inline void rb_end_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2483 unsigned long commits
;
2485 if (RB_WARN_ON(cpu_buffer
,
2486 !local_read(&cpu_buffer
->committing
)))
2490 commits
= local_read(&cpu_buffer
->commits
);
2491 /* synchronize with interrupts */
2493 if (local_read(&cpu_buffer
->committing
) == 1)
2494 rb_set_commit_to_write(cpu_buffer
);
2496 local_dec(&cpu_buffer
->committing
);
2498 /* synchronize with interrupts */
2502 * Need to account for interrupts coming in between the
2503 * updating of the commit page and the clearing of the
2504 * committing counter.
2506 if (unlikely(local_read(&cpu_buffer
->commits
) != commits
) &&
2507 !local_read(&cpu_buffer
->committing
)) {
2508 local_inc(&cpu_buffer
->committing
);
2513 static struct ring_buffer_event
*
2514 rb_reserve_next_event(struct ring_buffer
*buffer
,
2515 struct ring_buffer_per_cpu
*cpu_buffer
,
2516 unsigned long length
)
2518 struct ring_buffer_event
*event
;
2524 rb_start_commit(cpu_buffer
);
2526 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2528 * Due to the ability to swap a cpu buffer from a buffer
2529 * it is possible it was swapped before we committed.
2530 * (committing stops a swap). We check for it here and
2531 * if it happened, we have to fail the write.
2534 if (unlikely(ACCESS_ONCE(cpu_buffer
->buffer
) != buffer
)) {
2535 local_dec(&cpu_buffer
->committing
);
2536 local_dec(&cpu_buffer
->commits
);
2541 length
= rb_calculate_event_length(length
);
2547 * We allow for interrupts to reenter here and do a trace.
2548 * If one does, it will cause this original code to loop
2549 * back here. Even with heavy interrupts happening, this
2550 * should only happen a few times in a row. If this happens
2551 * 1000 times in a row, there must be either an interrupt
2552 * storm or we have something buggy.
2555 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 1000))
2558 ts
= rb_time_stamp(cpu_buffer
->buffer
);
2559 diff
= ts
- cpu_buffer
->write_stamp
;
2561 /* make sure this diff is calculated here */
2564 /* Did the write stamp get updated already? */
2565 if (likely(ts
>= cpu_buffer
->write_stamp
)) {
2567 if (unlikely(test_time_stamp(delta
))) {
2568 int local_clock_stable
= 1;
2569 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2570 local_clock_stable
= sched_clock_stable();
2572 WARN_ONCE(delta
> (1ULL << 59),
2573 KERN_WARNING
"Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2574 (unsigned long long)delta
,
2575 (unsigned long long)ts
,
2576 (unsigned long long)cpu_buffer
->write_stamp
,
2577 local_clock_stable
? "" :
2578 "If you just came from a suspend/resume,\n"
2579 "please switch to the trace global clock:\n"
2580 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2585 event
= __rb_reserve_next(cpu_buffer
, length
, ts
,
2586 delta
, add_timestamp
);
2587 if (unlikely(PTR_ERR(event
) == -EAGAIN
))
2596 rb_end_commit(cpu_buffer
);
2600 #ifdef CONFIG_TRACING
2603 * The lock and unlock are done within a preempt disable section.
2604 * The current_context per_cpu variable can only be modified
2605 * by the current task between lock and unlock. But it can
2606 * be modified more than once via an interrupt. To pass this
2607 * information from the lock to the unlock without having to
2608 * access the 'in_interrupt()' functions again (which do show
2609 * a bit of overhead in something as critical as function tracing,
2610 * we use a bitmask trick.
2612 * bit 0 = NMI context
2613 * bit 1 = IRQ context
2614 * bit 2 = SoftIRQ context
2615 * bit 3 = normal context.
2617 * This works because this is the order of contexts that can
2618 * preempt other contexts. A SoftIRQ never preempts an IRQ
2621 * When the context is determined, the corresponding bit is
2622 * checked and set (if it was set, then a recursion of that context
2625 * On unlock, we need to clear this bit. To do so, just subtract
2626 * 1 from the current_context and AND it to itself.
2630 * 101 & 100 = 100 (clearing bit zero)
2633 * 1010 & 1001 = 1000 (clearing bit 1)
2635 * The least significant bit can be cleared this way, and it
2636 * just so happens that it is the same bit corresponding to
2637 * the current context.
2639 static DEFINE_PER_CPU(unsigned int, current_context
);
2641 static __always_inline
int trace_recursive_lock(void)
2643 unsigned int val
= this_cpu_read(current_context
);
2646 if (in_interrupt()) {
2656 if (unlikely(val
& (1 << bit
)))
2660 this_cpu_write(current_context
, val
);
2665 static __always_inline
void trace_recursive_unlock(void)
2667 unsigned int val
= this_cpu_read(current_context
);
2670 val
&= this_cpu_read(current_context
);
2671 this_cpu_write(current_context
, val
);
2676 #define trace_recursive_lock() (0)
2677 #define trace_recursive_unlock() do { } while (0)
2682 * ring_buffer_lock_reserve - reserve a part of the buffer
2683 * @buffer: the ring buffer to reserve from
2684 * @length: the length of the data to reserve (excluding event header)
2686 * Returns a reseverd event on the ring buffer to copy directly to.
2687 * The user of this interface will need to get the body to write into
2688 * and can use the ring_buffer_event_data() interface.
2690 * The length is the length of the data needed, not the event length
2691 * which also includes the event header.
2693 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2694 * If NULL is returned, then nothing has been allocated or locked.
2696 struct ring_buffer_event
*
2697 ring_buffer_lock_reserve(struct ring_buffer
*buffer
, unsigned long length
)
2699 struct ring_buffer_per_cpu
*cpu_buffer
;
2700 struct ring_buffer_event
*event
;
2703 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2706 /* If we are tracing schedule, we don't want to recurse */
2707 preempt_disable_notrace();
2709 if (atomic_read(&buffer
->record_disabled
))
2712 if (trace_recursive_lock())
2715 cpu
= raw_smp_processor_id();
2717 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2720 cpu_buffer
= buffer
->buffers
[cpu
];
2722 if (atomic_read(&cpu_buffer
->record_disabled
))
2725 if (length
> BUF_MAX_DATA_SIZE
)
2728 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2735 trace_recursive_unlock();
2738 preempt_enable_notrace();
2741 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve
);
2744 rb_update_write_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
2745 struct ring_buffer_event
*event
)
2750 * The event first in the commit queue updates the
2753 if (rb_event_is_commit(cpu_buffer
, event
)) {
2755 * A commit event that is first on a page
2756 * updates the write timestamp with the page stamp
2758 if (!rb_event_index(event
))
2759 cpu_buffer
->write_stamp
=
2760 cpu_buffer
->commit_page
->page
->time_stamp
;
2761 else if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
) {
2762 delta
= event
->array
[0];
2764 delta
+= event
->time_delta
;
2765 cpu_buffer
->write_stamp
+= delta
;
2767 cpu_buffer
->write_stamp
+= event
->time_delta
;
2771 static void rb_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
2772 struct ring_buffer_event
*event
)
2774 local_inc(&cpu_buffer
->entries
);
2775 rb_update_write_stamp(cpu_buffer
, event
);
2776 rb_end_commit(cpu_buffer
);
2779 static __always_inline
void
2780 rb_wakeups(struct ring_buffer
*buffer
, struct ring_buffer_per_cpu
*cpu_buffer
)
2782 if (buffer
->irq_work
.waiters_pending
) {
2783 buffer
->irq_work
.waiters_pending
= false;
2784 /* irq_work_queue() supplies it's own memory barriers */
2785 irq_work_queue(&buffer
->irq_work
.work
);
2788 if (cpu_buffer
->irq_work
.waiters_pending
) {
2789 cpu_buffer
->irq_work
.waiters_pending
= false;
2790 /* irq_work_queue() supplies it's own memory barriers */
2791 irq_work_queue(&cpu_buffer
->irq_work
.work
);
2796 * ring_buffer_unlock_commit - commit a reserved
2797 * @buffer: The buffer to commit to
2798 * @event: The event pointer to commit.
2800 * This commits the data to the ring buffer, and releases any locks held.
2802 * Must be paired with ring_buffer_lock_reserve.
2804 int ring_buffer_unlock_commit(struct ring_buffer
*buffer
,
2805 struct ring_buffer_event
*event
)
2807 struct ring_buffer_per_cpu
*cpu_buffer
;
2808 int cpu
= raw_smp_processor_id();
2810 cpu_buffer
= buffer
->buffers
[cpu
];
2812 rb_commit(cpu_buffer
, event
);
2814 rb_wakeups(buffer
, cpu_buffer
);
2816 trace_recursive_unlock();
2818 preempt_enable_notrace();
2822 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit
);
2824 static inline void rb_event_discard(struct ring_buffer_event
*event
)
2826 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
2827 event
= skip_time_extend(event
);
2829 /* array[0] holds the actual length for the discarded event */
2830 event
->array
[0] = rb_event_data_length(event
) - RB_EVNT_HDR_SIZE
;
2831 event
->type_len
= RINGBUF_TYPE_PADDING
;
2832 /* time delta must be non zero */
2833 if (!event
->time_delta
)
2834 event
->time_delta
= 1;
2838 * Decrement the entries to the page that an event is on.
2839 * The event does not even need to exist, only the pointer
2840 * to the page it is on. This may only be called before the commit
2844 rb_decrement_entry(struct ring_buffer_per_cpu
*cpu_buffer
,
2845 struct ring_buffer_event
*event
)
2847 unsigned long addr
= (unsigned long)event
;
2848 struct buffer_page
*bpage
= cpu_buffer
->commit_page
;
2849 struct buffer_page
*start
;
2853 /* Do the likely case first */
2854 if (likely(bpage
->page
== (void *)addr
)) {
2855 local_dec(&bpage
->entries
);
2860 * Because the commit page may be on the reader page we
2861 * start with the next page and check the end loop there.
2863 rb_inc_page(cpu_buffer
, &bpage
);
2866 if (bpage
->page
== (void *)addr
) {
2867 local_dec(&bpage
->entries
);
2870 rb_inc_page(cpu_buffer
, &bpage
);
2871 } while (bpage
!= start
);
2873 /* commit not part of this buffer?? */
2874 RB_WARN_ON(cpu_buffer
, 1);
2878 * ring_buffer_commit_discard - discard an event that has not been committed
2879 * @buffer: the ring buffer
2880 * @event: non committed event to discard
2882 * Sometimes an event that is in the ring buffer needs to be ignored.
2883 * This function lets the user discard an event in the ring buffer
2884 * and then that event will not be read later.
2886 * This function only works if it is called before the the item has been
2887 * committed. It will try to free the event from the ring buffer
2888 * if another event has not been added behind it.
2890 * If another event has been added behind it, it will set the event
2891 * up as discarded, and perform the commit.
2893 * If this function is called, do not call ring_buffer_unlock_commit on
2896 void ring_buffer_discard_commit(struct ring_buffer
*buffer
,
2897 struct ring_buffer_event
*event
)
2899 struct ring_buffer_per_cpu
*cpu_buffer
;
2902 /* The event is discarded regardless */
2903 rb_event_discard(event
);
2905 cpu
= smp_processor_id();
2906 cpu_buffer
= buffer
->buffers
[cpu
];
2909 * This must only be called if the event has not been
2910 * committed yet. Thus we can assume that preemption
2911 * is still disabled.
2913 RB_WARN_ON(buffer
, !local_read(&cpu_buffer
->committing
));
2915 rb_decrement_entry(cpu_buffer
, event
);
2916 if (rb_try_to_discard(cpu_buffer
, event
))
2920 * The commit is still visible by the reader, so we
2921 * must still update the timestamp.
2923 rb_update_write_stamp(cpu_buffer
, event
);
2925 rb_end_commit(cpu_buffer
);
2927 trace_recursive_unlock();
2929 preempt_enable_notrace();
2932 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit
);
2935 * ring_buffer_write - write data to the buffer without reserving
2936 * @buffer: The ring buffer to write to.
2937 * @length: The length of the data being written (excluding the event header)
2938 * @data: The data to write to the buffer.
2940 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2941 * one function. If you already have the data to write to the buffer, it
2942 * may be easier to simply call this function.
2944 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2945 * and not the length of the event which would hold the header.
2947 int ring_buffer_write(struct ring_buffer
*buffer
,
2948 unsigned long length
,
2951 struct ring_buffer_per_cpu
*cpu_buffer
;
2952 struct ring_buffer_event
*event
;
2957 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2960 preempt_disable_notrace();
2962 if (atomic_read(&buffer
->record_disabled
))
2965 cpu
= raw_smp_processor_id();
2967 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2970 cpu_buffer
= buffer
->buffers
[cpu
];
2972 if (atomic_read(&cpu_buffer
->record_disabled
))
2975 if (length
> BUF_MAX_DATA_SIZE
)
2978 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2982 body
= rb_event_data(event
);
2984 memcpy(body
, data
, length
);
2986 rb_commit(cpu_buffer
, event
);
2988 rb_wakeups(buffer
, cpu_buffer
);
2992 preempt_enable_notrace();
2996 EXPORT_SYMBOL_GPL(ring_buffer_write
);
2998 static int rb_per_cpu_empty(struct ring_buffer_per_cpu
*cpu_buffer
)
3000 struct buffer_page
*reader
= cpu_buffer
->reader_page
;
3001 struct buffer_page
*head
= rb_set_head_page(cpu_buffer
);
3002 struct buffer_page
*commit
= cpu_buffer
->commit_page
;
3004 /* In case of error, head will be NULL */
3005 if (unlikely(!head
))
3008 return reader
->read
== rb_page_commit(reader
) &&
3009 (commit
== reader
||
3011 head
->read
== rb_page_commit(commit
)));
3015 * ring_buffer_record_disable - stop all writes into the buffer
3016 * @buffer: The ring buffer to stop writes to.
3018 * This prevents all writes to the buffer. Any attempt to write
3019 * to the buffer after this will fail and return NULL.
3021 * The caller should call synchronize_sched() after this.
3023 void ring_buffer_record_disable(struct ring_buffer
*buffer
)
3025 atomic_inc(&buffer
->record_disabled
);
3027 EXPORT_SYMBOL_GPL(ring_buffer_record_disable
);
3030 * ring_buffer_record_enable - enable writes to the buffer
3031 * @buffer: The ring buffer to enable writes
3033 * Note, multiple disables will need the same number of enables
3034 * to truly enable the writing (much like preempt_disable).
3036 void ring_buffer_record_enable(struct ring_buffer
*buffer
)
3038 atomic_dec(&buffer
->record_disabled
);
3040 EXPORT_SYMBOL_GPL(ring_buffer_record_enable
);
3043 * ring_buffer_record_off - stop all writes into the buffer
3044 * @buffer: The ring buffer to stop writes to.
3046 * This prevents all writes to the buffer. Any attempt to write
3047 * to the buffer after this will fail and return NULL.
3049 * This is different than ring_buffer_record_disable() as
3050 * it works like an on/off switch, where as the disable() version
3051 * must be paired with a enable().
3053 void ring_buffer_record_off(struct ring_buffer
*buffer
)
3056 unsigned int new_rd
;
3059 rd
= atomic_read(&buffer
->record_disabled
);
3060 new_rd
= rd
| RB_BUFFER_OFF
;
3061 } while (atomic_cmpxchg(&buffer
->record_disabled
, rd
, new_rd
) != rd
);
3063 EXPORT_SYMBOL_GPL(ring_buffer_record_off
);
3066 * ring_buffer_record_on - restart writes into the buffer
3067 * @buffer: The ring buffer to start writes to.
3069 * This enables all writes to the buffer that was disabled by
3070 * ring_buffer_record_off().
3072 * This is different than ring_buffer_record_enable() as
3073 * it works like an on/off switch, where as the enable() version
3074 * must be paired with a disable().
3076 void ring_buffer_record_on(struct ring_buffer
*buffer
)
3079 unsigned int new_rd
;
3082 rd
= atomic_read(&buffer
->record_disabled
);
3083 new_rd
= rd
& ~RB_BUFFER_OFF
;
3084 } while (atomic_cmpxchg(&buffer
->record_disabled
, rd
, new_rd
) != rd
);
3086 EXPORT_SYMBOL_GPL(ring_buffer_record_on
);
3089 * ring_buffer_record_is_on - return true if the ring buffer can write
3090 * @buffer: The ring buffer to see if write is enabled
3092 * Returns true if the ring buffer is in a state that it accepts writes.
3094 int ring_buffer_record_is_on(struct ring_buffer
*buffer
)
3096 return !atomic_read(&buffer
->record_disabled
);
3100 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3101 * @buffer: The ring buffer to stop writes to.
3102 * @cpu: The CPU buffer to stop
3104 * This prevents all writes to the buffer. Any attempt to write
3105 * to the buffer after this will fail and return NULL.
3107 * The caller should call synchronize_sched() after this.
3109 void ring_buffer_record_disable_cpu(struct ring_buffer
*buffer
, int cpu
)
3111 struct ring_buffer_per_cpu
*cpu_buffer
;
3113 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3116 cpu_buffer
= buffer
->buffers
[cpu
];
3117 atomic_inc(&cpu_buffer
->record_disabled
);
3119 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu
);
3122 * ring_buffer_record_enable_cpu - enable writes to the buffer
3123 * @buffer: The ring buffer to enable writes
3124 * @cpu: The CPU to enable.
3126 * Note, multiple disables will need the same number of enables
3127 * to truly enable the writing (much like preempt_disable).
3129 void ring_buffer_record_enable_cpu(struct ring_buffer
*buffer
, int cpu
)
3131 struct ring_buffer_per_cpu
*cpu_buffer
;
3133 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3136 cpu_buffer
= buffer
->buffers
[cpu
];
3137 atomic_dec(&cpu_buffer
->record_disabled
);
3139 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu
);
3142 * The total entries in the ring buffer is the running counter
3143 * of entries entered into the ring buffer, minus the sum of
3144 * the entries read from the ring buffer and the number of
3145 * entries that were overwritten.
3147 static inline unsigned long
3148 rb_num_of_entries(struct ring_buffer_per_cpu
*cpu_buffer
)
3150 return local_read(&cpu_buffer
->entries
) -
3151 (local_read(&cpu_buffer
->overrun
) + cpu_buffer
->read
);
3155 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3156 * @buffer: The ring buffer
3157 * @cpu: The per CPU buffer to read from.
3159 u64
ring_buffer_oldest_event_ts(struct ring_buffer
*buffer
, int cpu
)
3161 unsigned long flags
;
3162 struct ring_buffer_per_cpu
*cpu_buffer
;
3163 struct buffer_page
*bpage
;
3166 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3169 cpu_buffer
= buffer
->buffers
[cpu
];
3170 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3172 * if the tail is on reader_page, oldest time stamp is on the reader
3175 if (cpu_buffer
->tail_page
== cpu_buffer
->reader_page
)
3176 bpage
= cpu_buffer
->reader_page
;
3178 bpage
= rb_set_head_page(cpu_buffer
);
3180 ret
= bpage
->page
->time_stamp
;
3181 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3185 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts
);
3188 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3189 * @buffer: The ring buffer
3190 * @cpu: The per CPU buffer to read from.
3192 unsigned long ring_buffer_bytes_cpu(struct ring_buffer
*buffer
, int cpu
)
3194 struct ring_buffer_per_cpu
*cpu_buffer
;
3197 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3200 cpu_buffer
= buffer
->buffers
[cpu
];
3201 ret
= local_read(&cpu_buffer
->entries_bytes
) - cpu_buffer
->read_bytes
;
3205 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu
);
3208 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3209 * @buffer: The ring buffer
3210 * @cpu: The per CPU buffer to get the entries from.
3212 unsigned long ring_buffer_entries_cpu(struct ring_buffer
*buffer
, int cpu
)
3214 struct ring_buffer_per_cpu
*cpu_buffer
;
3216 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3219 cpu_buffer
= buffer
->buffers
[cpu
];
3221 return rb_num_of_entries(cpu_buffer
);
3223 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu
);
3226 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3227 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3228 * @buffer: The ring buffer
3229 * @cpu: The per CPU buffer to get the number of overruns from
3231 unsigned long ring_buffer_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
3233 struct ring_buffer_per_cpu
*cpu_buffer
;
3236 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3239 cpu_buffer
= buffer
->buffers
[cpu
];
3240 ret
= local_read(&cpu_buffer
->overrun
);
3244 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu
);
3247 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3248 * commits failing due to the buffer wrapping around while there are uncommitted
3249 * events, such as during an interrupt storm.
3250 * @buffer: The ring buffer
3251 * @cpu: The per CPU buffer to get the number of overruns from
3254 ring_buffer_commit_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
3256 struct ring_buffer_per_cpu
*cpu_buffer
;
3259 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3262 cpu_buffer
= buffer
->buffers
[cpu
];
3263 ret
= local_read(&cpu_buffer
->commit_overrun
);
3267 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu
);
3270 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3271 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3272 * @buffer: The ring buffer
3273 * @cpu: The per CPU buffer to get the number of overruns from
3276 ring_buffer_dropped_events_cpu(struct ring_buffer
*buffer
, int cpu
)
3278 struct ring_buffer_per_cpu
*cpu_buffer
;
3281 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3284 cpu_buffer
= buffer
->buffers
[cpu
];
3285 ret
= local_read(&cpu_buffer
->dropped_events
);
3289 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu
);
3292 * ring_buffer_read_events_cpu - get the number of events successfully read
3293 * @buffer: The ring buffer
3294 * @cpu: The per CPU buffer to get the number of events read
3297 ring_buffer_read_events_cpu(struct ring_buffer
*buffer
, int cpu
)
3299 struct ring_buffer_per_cpu
*cpu_buffer
;
3301 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3304 cpu_buffer
= buffer
->buffers
[cpu
];
3305 return cpu_buffer
->read
;
3307 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu
);
3310 * ring_buffer_entries - get the number of entries in a buffer
3311 * @buffer: The ring buffer
3313 * Returns the total number of entries in the ring buffer
3316 unsigned long ring_buffer_entries(struct ring_buffer
*buffer
)
3318 struct ring_buffer_per_cpu
*cpu_buffer
;
3319 unsigned long entries
= 0;
3322 /* if you care about this being correct, lock the buffer */
3323 for_each_buffer_cpu(buffer
, cpu
) {
3324 cpu_buffer
= buffer
->buffers
[cpu
];
3325 entries
+= rb_num_of_entries(cpu_buffer
);
3330 EXPORT_SYMBOL_GPL(ring_buffer_entries
);
3333 * ring_buffer_overruns - get the number of overruns in buffer
3334 * @buffer: The ring buffer
3336 * Returns the total number of overruns in the ring buffer
3339 unsigned long ring_buffer_overruns(struct ring_buffer
*buffer
)
3341 struct ring_buffer_per_cpu
*cpu_buffer
;
3342 unsigned long overruns
= 0;
3345 /* if you care about this being correct, lock the buffer */
3346 for_each_buffer_cpu(buffer
, cpu
) {
3347 cpu_buffer
= buffer
->buffers
[cpu
];
3348 overruns
+= local_read(&cpu_buffer
->overrun
);
3353 EXPORT_SYMBOL_GPL(ring_buffer_overruns
);
3355 static void rb_iter_reset(struct ring_buffer_iter
*iter
)
3357 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3359 /* Iterator usage is expected to have record disabled */
3360 if (list_empty(&cpu_buffer
->reader_page
->list
)) {
3361 iter
->head_page
= rb_set_head_page(cpu_buffer
);
3362 if (unlikely(!iter
->head_page
))
3364 iter
->head
= iter
->head_page
->read
;
3366 iter
->head_page
= cpu_buffer
->reader_page
;
3367 iter
->head
= cpu_buffer
->reader_page
->read
;
3370 iter
->read_stamp
= cpu_buffer
->read_stamp
;
3372 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
3373 iter
->cache_reader_page
= cpu_buffer
->reader_page
;
3374 iter
->cache_read
= cpu_buffer
->read
;
3378 * ring_buffer_iter_reset - reset an iterator
3379 * @iter: The iterator to reset
3381 * Resets the iterator, so that it will start from the beginning
3384 void ring_buffer_iter_reset(struct ring_buffer_iter
*iter
)
3386 struct ring_buffer_per_cpu
*cpu_buffer
;
3387 unsigned long flags
;
3392 cpu_buffer
= iter
->cpu_buffer
;
3394 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3395 rb_iter_reset(iter
);
3396 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3398 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset
);
3401 * ring_buffer_iter_empty - check if an iterator has no more to read
3402 * @iter: The iterator to check
3404 int ring_buffer_iter_empty(struct ring_buffer_iter
*iter
)
3406 struct ring_buffer_per_cpu
*cpu_buffer
;
3408 cpu_buffer
= iter
->cpu_buffer
;
3410 return iter
->head_page
== cpu_buffer
->commit_page
&&
3411 iter
->head
== rb_commit_index(cpu_buffer
);
3413 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty
);
3416 rb_update_read_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
3417 struct ring_buffer_event
*event
)
3421 switch (event
->type_len
) {
3422 case RINGBUF_TYPE_PADDING
:
3425 case RINGBUF_TYPE_TIME_EXTEND
:
3426 delta
= event
->array
[0];
3428 delta
+= event
->time_delta
;
3429 cpu_buffer
->read_stamp
+= delta
;
3432 case RINGBUF_TYPE_TIME_STAMP
:
3433 /* FIXME: not implemented */
3436 case RINGBUF_TYPE_DATA
:
3437 cpu_buffer
->read_stamp
+= event
->time_delta
;
3447 rb_update_iter_read_stamp(struct ring_buffer_iter
*iter
,
3448 struct ring_buffer_event
*event
)
3452 switch (event
->type_len
) {
3453 case RINGBUF_TYPE_PADDING
:
3456 case RINGBUF_TYPE_TIME_EXTEND
:
3457 delta
= event
->array
[0];
3459 delta
+= event
->time_delta
;
3460 iter
->read_stamp
+= delta
;
3463 case RINGBUF_TYPE_TIME_STAMP
:
3464 /* FIXME: not implemented */
3467 case RINGBUF_TYPE_DATA
:
3468 iter
->read_stamp
+= event
->time_delta
;
3477 static struct buffer_page
*
3478 rb_get_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
3480 struct buffer_page
*reader
= NULL
;
3481 unsigned long overwrite
;
3482 unsigned long flags
;
3486 local_irq_save(flags
);
3487 arch_spin_lock(&cpu_buffer
->lock
);
3491 * This should normally only loop twice. But because the
3492 * start of the reader inserts an empty page, it causes
3493 * a case where we will loop three times. There should be no
3494 * reason to loop four times (that I know of).
3496 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 3)) {
3501 reader
= cpu_buffer
->reader_page
;
3503 /* If there's more to read, return this page */
3504 if (cpu_buffer
->reader_page
->read
< rb_page_size(reader
))
3507 /* Never should we have an index greater than the size */
3508 if (RB_WARN_ON(cpu_buffer
,
3509 cpu_buffer
->reader_page
->read
> rb_page_size(reader
)))
3512 /* check if we caught up to the tail */
3514 if (cpu_buffer
->commit_page
== cpu_buffer
->reader_page
)
3517 /* Don't bother swapping if the ring buffer is empty */
3518 if (rb_num_of_entries(cpu_buffer
) == 0)
3522 * Reset the reader page to size zero.
3524 local_set(&cpu_buffer
->reader_page
->write
, 0);
3525 local_set(&cpu_buffer
->reader_page
->entries
, 0);
3526 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
3527 cpu_buffer
->reader_page
->real_end
= 0;
3531 * Splice the empty reader page into the list around the head.
3533 reader
= rb_set_head_page(cpu_buffer
);
3536 cpu_buffer
->reader_page
->list
.next
= rb_list_head(reader
->list
.next
);
3537 cpu_buffer
->reader_page
->list
.prev
= reader
->list
.prev
;
3540 * cpu_buffer->pages just needs to point to the buffer, it
3541 * has no specific buffer page to point to. Lets move it out
3542 * of our way so we don't accidentally swap it.
3544 cpu_buffer
->pages
= reader
->list
.prev
;
3546 /* The reader page will be pointing to the new head */
3547 rb_set_list_to_head(cpu_buffer
, &cpu_buffer
->reader_page
->list
);
3550 * We want to make sure we read the overruns after we set up our
3551 * pointers to the next object. The writer side does a
3552 * cmpxchg to cross pages which acts as the mb on the writer
3553 * side. Note, the reader will constantly fail the swap
3554 * while the writer is updating the pointers, so this
3555 * guarantees that the overwrite recorded here is the one we
3556 * want to compare with the last_overrun.
3559 overwrite
= local_read(&(cpu_buffer
->overrun
));
3562 * Here's the tricky part.
3564 * We need to move the pointer past the header page.
3565 * But we can only do that if a writer is not currently
3566 * moving it. The page before the header page has the
3567 * flag bit '1' set if it is pointing to the page we want.
3568 * but if the writer is in the process of moving it
3569 * than it will be '2' or already moved '0'.
3572 ret
= rb_head_page_replace(reader
, cpu_buffer
->reader_page
);
3575 * If we did not convert it, then we must try again.
3581 * Yeah! We succeeded in replacing the page.
3583 * Now make the new head point back to the reader page.
3585 rb_list_head(reader
->list
.next
)->prev
= &cpu_buffer
->reader_page
->list
;
3586 rb_inc_page(cpu_buffer
, &cpu_buffer
->head_page
);
3588 /* Finally update the reader page to the new head */
3589 cpu_buffer
->reader_page
= reader
;
3590 rb_reset_reader_page(cpu_buffer
);
3592 if (overwrite
!= cpu_buffer
->last_overrun
) {
3593 cpu_buffer
->lost_events
= overwrite
- cpu_buffer
->last_overrun
;
3594 cpu_buffer
->last_overrun
= overwrite
;
3600 arch_spin_unlock(&cpu_buffer
->lock
);
3601 local_irq_restore(flags
);
3606 static void rb_advance_reader(struct ring_buffer_per_cpu
*cpu_buffer
)
3608 struct ring_buffer_event
*event
;
3609 struct buffer_page
*reader
;
3612 reader
= rb_get_reader_page(cpu_buffer
);
3614 /* This function should not be called when buffer is empty */
3615 if (RB_WARN_ON(cpu_buffer
, !reader
))
3618 event
= rb_reader_event(cpu_buffer
);
3620 if (event
->type_len
<= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
3623 rb_update_read_stamp(cpu_buffer
, event
);
3625 length
= rb_event_length(event
);
3626 cpu_buffer
->reader_page
->read
+= length
;
3629 static void rb_advance_iter(struct ring_buffer_iter
*iter
)
3631 struct ring_buffer_per_cpu
*cpu_buffer
;
3632 struct ring_buffer_event
*event
;
3635 cpu_buffer
= iter
->cpu_buffer
;
3638 * Check if we are at the end of the buffer.
3640 if (iter
->head
>= rb_page_size(iter
->head_page
)) {
3641 /* discarded commits can make the page empty */
3642 if (iter
->head_page
== cpu_buffer
->commit_page
)
3648 event
= rb_iter_head_event(iter
);
3650 length
= rb_event_length(event
);
3653 * This should not be called to advance the header if we are
3654 * at the tail of the buffer.
3656 if (RB_WARN_ON(cpu_buffer
,
3657 (iter
->head_page
== cpu_buffer
->commit_page
) &&
3658 (iter
->head
+ length
> rb_commit_index(cpu_buffer
))))
3661 rb_update_iter_read_stamp(iter
, event
);
3663 iter
->head
+= length
;
3665 /* check for end of page padding */
3666 if ((iter
->head
>= rb_page_size(iter
->head_page
)) &&
3667 (iter
->head_page
!= cpu_buffer
->commit_page
))
3671 static int rb_lost_events(struct ring_buffer_per_cpu
*cpu_buffer
)
3673 return cpu_buffer
->lost_events
;
3676 static struct ring_buffer_event
*
3677 rb_buffer_peek(struct ring_buffer_per_cpu
*cpu_buffer
, u64
*ts
,
3678 unsigned long *lost_events
)
3680 struct ring_buffer_event
*event
;
3681 struct buffer_page
*reader
;
3686 * We repeat when a time extend is encountered.
3687 * Since the time extend is always attached to a data event,
3688 * we should never loop more than once.
3689 * (We never hit the following condition more than twice).
3691 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 2))
3694 reader
= rb_get_reader_page(cpu_buffer
);
3698 event
= rb_reader_event(cpu_buffer
);
3700 switch (event
->type_len
) {
3701 case RINGBUF_TYPE_PADDING
:
3702 if (rb_null_event(event
))
3703 RB_WARN_ON(cpu_buffer
, 1);
3705 * Because the writer could be discarding every
3706 * event it creates (which would probably be bad)
3707 * if we were to go back to "again" then we may never
3708 * catch up, and will trigger the warn on, or lock
3709 * the box. Return the padding, and we will release
3710 * the current locks, and try again.
3714 case RINGBUF_TYPE_TIME_EXTEND
:
3715 /* Internal data, OK to advance */
3716 rb_advance_reader(cpu_buffer
);
3719 case RINGBUF_TYPE_TIME_STAMP
:
3720 /* FIXME: not implemented */
3721 rb_advance_reader(cpu_buffer
);
3724 case RINGBUF_TYPE_DATA
:
3726 *ts
= cpu_buffer
->read_stamp
+ event
->time_delta
;
3727 ring_buffer_normalize_time_stamp(cpu_buffer
->buffer
,
3728 cpu_buffer
->cpu
, ts
);
3731 *lost_events
= rb_lost_events(cpu_buffer
);
3740 EXPORT_SYMBOL_GPL(ring_buffer_peek
);
3742 static struct ring_buffer_event
*
3743 rb_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3745 struct ring_buffer
*buffer
;
3746 struct ring_buffer_per_cpu
*cpu_buffer
;
3747 struct ring_buffer_event
*event
;
3750 cpu_buffer
= iter
->cpu_buffer
;
3751 buffer
= cpu_buffer
->buffer
;
3754 * Check if someone performed a consuming read to
3755 * the buffer. A consuming read invalidates the iterator
3756 * and we need to reset the iterator in this case.
3758 if (unlikely(iter
->cache_read
!= cpu_buffer
->read
||
3759 iter
->cache_reader_page
!= cpu_buffer
->reader_page
))
3760 rb_iter_reset(iter
);
3763 if (ring_buffer_iter_empty(iter
))
3767 * We repeat when a time extend is encountered.
3768 * Since the time extend is always attached to a data event,
3769 * we should never loop more than once.
3770 * (We never hit the following condition more than twice).
3772 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 2))
3775 if (rb_per_cpu_empty(cpu_buffer
))
3778 if (iter
->head
>= local_read(&iter
->head_page
->page
->commit
)) {
3783 event
= rb_iter_head_event(iter
);
3785 switch (event
->type_len
) {
3786 case RINGBUF_TYPE_PADDING
:
3787 if (rb_null_event(event
)) {
3791 rb_advance_iter(iter
);
3794 case RINGBUF_TYPE_TIME_EXTEND
:
3795 /* Internal data, OK to advance */
3796 rb_advance_iter(iter
);
3799 case RINGBUF_TYPE_TIME_STAMP
:
3800 /* FIXME: not implemented */
3801 rb_advance_iter(iter
);
3804 case RINGBUF_TYPE_DATA
:
3806 *ts
= iter
->read_stamp
+ event
->time_delta
;
3807 ring_buffer_normalize_time_stamp(buffer
,
3808 cpu_buffer
->cpu
, ts
);
3818 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek
);
3820 static inline int rb_ok_to_lock(void)
3823 * If an NMI die dumps out the content of the ring buffer
3824 * do not grab locks. We also permanently disable the ring
3825 * buffer too. A one time deal is all you get from reading
3826 * the ring buffer from an NMI.
3828 if (likely(!in_nmi()))
3831 tracing_off_permanent();
3836 * ring_buffer_peek - peek at the next event to be read
3837 * @buffer: The ring buffer to read
3838 * @cpu: The cpu to peak at
3839 * @ts: The timestamp counter of this event.
3840 * @lost_events: a variable to store if events were lost (may be NULL)
3842 * This will return the event that will be read next, but does
3843 * not consume the data.
3845 struct ring_buffer_event
*
3846 ring_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
,
3847 unsigned long *lost_events
)
3849 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3850 struct ring_buffer_event
*event
;
3851 unsigned long flags
;
3854 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3857 dolock
= rb_ok_to_lock();
3859 local_irq_save(flags
);
3861 raw_spin_lock(&cpu_buffer
->reader_lock
);
3862 event
= rb_buffer_peek(cpu_buffer
, ts
, lost_events
);
3863 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3864 rb_advance_reader(cpu_buffer
);
3866 raw_spin_unlock(&cpu_buffer
->reader_lock
);
3867 local_irq_restore(flags
);
3869 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3876 * ring_buffer_iter_peek - peek at the next event to be read
3877 * @iter: The ring buffer iterator
3878 * @ts: The timestamp counter of this event.
3880 * This will return the event that will be read next, but does
3881 * not increment the iterator.
3883 struct ring_buffer_event
*
3884 ring_buffer_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3886 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3887 struct ring_buffer_event
*event
;
3888 unsigned long flags
;
3891 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3892 event
= rb_iter_peek(iter
, ts
);
3893 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3895 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3902 * ring_buffer_consume - return an event and consume it
3903 * @buffer: The ring buffer to get the next event from
3904 * @cpu: the cpu to read the buffer from
3905 * @ts: a variable to store the timestamp (may be NULL)
3906 * @lost_events: a variable to store if events were lost (may be NULL)
3908 * Returns the next event in the ring buffer, and that event is consumed.
3909 * Meaning, that sequential reads will keep returning a different event,
3910 * and eventually empty the ring buffer if the producer is slower.
3912 struct ring_buffer_event
*
3913 ring_buffer_consume(struct ring_buffer
*buffer
, int cpu
, u64
*ts
,
3914 unsigned long *lost_events
)
3916 struct ring_buffer_per_cpu
*cpu_buffer
;
3917 struct ring_buffer_event
*event
= NULL
;
3918 unsigned long flags
;
3921 dolock
= rb_ok_to_lock();
3924 /* might be called in atomic */
3927 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3930 cpu_buffer
= buffer
->buffers
[cpu
];
3931 local_irq_save(flags
);
3933 raw_spin_lock(&cpu_buffer
->reader_lock
);
3935 event
= rb_buffer_peek(cpu_buffer
, ts
, lost_events
);
3937 cpu_buffer
->lost_events
= 0;
3938 rb_advance_reader(cpu_buffer
);
3942 raw_spin_unlock(&cpu_buffer
->reader_lock
);
3943 local_irq_restore(flags
);
3948 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3953 EXPORT_SYMBOL_GPL(ring_buffer_consume
);
3956 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3957 * @buffer: The ring buffer to read from
3958 * @cpu: The cpu buffer to iterate over
3960 * This performs the initial preparations necessary to iterate
3961 * through the buffer. Memory is allocated, buffer recording
3962 * is disabled, and the iterator pointer is returned to the caller.
3964 * Disabling buffer recordng prevents the reading from being
3965 * corrupted. This is not a consuming read, so a producer is not
3968 * After a sequence of ring_buffer_read_prepare calls, the user is
3969 * expected to make at least one call to ring_buffer_read_prepare_sync.
3970 * Afterwards, ring_buffer_read_start is invoked to get things going
3973 * This overall must be paired with ring_buffer_read_finish.
3975 struct ring_buffer_iter
*
3976 ring_buffer_read_prepare(struct ring_buffer
*buffer
, int cpu
)
3978 struct ring_buffer_per_cpu
*cpu_buffer
;
3979 struct ring_buffer_iter
*iter
;
3981 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3984 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
3988 cpu_buffer
= buffer
->buffers
[cpu
];
3990 iter
->cpu_buffer
= cpu_buffer
;
3992 atomic_inc(&buffer
->resize_disabled
);
3993 atomic_inc(&cpu_buffer
->record_disabled
);
3997 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare
);
4000 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4002 * All previously invoked ring_buffer_read_prepare calls to prepare
4003 * iterators will be synchronized. Afterwards, read_buffer_read_start
4004 * calls on those iterators are allowed.
4007 ring_buffer_read_prepare_sync(void)
4009 synchronize_sched();
4011 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync
);
4014 * ring_buffer_read_start - start a non consuming read of the buffer
4015 * @iter: The iterator returned by ring_buffer_read_prepare
4017 * This finalizes the startup of an iteration through the buffer.
4018 * The iterator comes from a call to ring_buffer_read_prepare and
4019 * an intervening ring_buffer_read_prepare_sync must have been
4022 * Must be paired with ring_buffer_read_finish.
4025 ring_buffer_read_start(struct ring_buffer_iter
*iter
)
4027 struct ring_buffer_per_cpu
*cpu_buffer
;
4028 unsigned long flags
;
4033 cpu_buffer
= iter
->cpu_buffer
;
4035 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4036 arch_spin_lock(&cpu_buffer
->lock
);
4037 rb_iter_reset(iter
);
4038 arch_spin_unlock(&cpu_buffer
->lock
);
4039 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4041 EXPORT_SYMBOL_GPL(ring_buffer_read_start
);
4044 * ring_buffer_read_finish - finish reading the iterator of the buffer
4045 * @iter: The iterator retrieved by ring_buffer_start
4047 * This re-enables the recording to the buffer, and frees the
4051 ring_buffer_read_finish(struct ring_buffer_iter
*iter
)
4053 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
4054 unsigned long flags
;
4057 * Ring buffer is disabled from recording, here's a good place
4058 * to check the integrity of the ring buffer.
4059 * Must prevent readers from trying to read, as the check
4060 * clears the HEAD page and readers require it.
4062 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4063 rb_check_pages(cpu_buffer
);
4064 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4066 atomic_dec(&cpu_buffer
->record_disabled
);
4067 atomic_dec(&cpu_buffer
->buffer
->resize_disabled
);
4070 EXPORT_SYMBOL_GPL(ring_buffer_read_finish
);
4073 * ring_buffer_read - read the next item in the ring buffer by the iterator
4074 * @iter: The ring buffer iterator
4075 * @ts: The time stamp of the event read.
4077 * This reads the next event in the ring buffer and increments the iterator.
4079 struct ring_buffer_event
*
4080 ring_buffer_read(struct ring_buffer_iter
*iter
, u64
*ts
)
4082 struct ring_buffer_event
*event
;
4083 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
4084 unsigned long flags
;
4086 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4088 event
= rb_iter_peek(iter
, ts
);
4092 if (event
->type_len
== RINGBUF_TYPE_PADDING
)
4095 rb_advance_iter(iter
);
4097 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4101 EXPORT_SYMBOL_GPL(ring_buffer_read
);
4104 * ring_buffer_size - return the size of the ring buffer (in bytes)
4105 * @buffer: The ring buffer.
4107 unsigned long ring_buffer_size(struct ring_buffer
*buffer
, int cpu
)
4110 * Earlier, this method returned
4111 * BUF_PAGE_SIZE * buffer->nr_pages
4112 * Since the nr_pages field is now removed, we have converted this to
4113 * return the per cpu buffer value.
4115 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4118 return BUF_PAGE_SIZE
* buffer
->buffers
[cpu
]->nr_pages
;
4120 EXPORT_SYMBOL_GPL(ring_buffer_size
);
4123 rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
)
4125 rb_head_page_deactivate(cpu_buffer
);
4127 cpu_buffer
->head_page
4128 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
4129 local_set(&cpu_buffer
->head_page
->write
, 0);
4130 local_set(&cpu_buffer
->head_page
->entries
, 0);
4131 local_set(&cpu_buffer
->head_page
->page
->commit
, 0);
4133 cpu_buffer
->head_page
->read
= 0;
4135 cpu_buffer
->tail_page
= cpu_buffer
->head_page
;
4136 cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
4138 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
4139 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
4140 local_set(&cpu_buffer
->reader_page
->write
, 0);
4141 local_set(&cpu_buffer
->reader_page
->entries
, 0);
4142 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
4143 cpu_buffer
->reader_page
->read
= 0;
4145 local_set(&cpu_buffer
->entries_bytes
, 0);
4146 local_set(&cpu_buffer
->overrun
, 0);
4147 local_set(&cpu_buffer
->commit_overrun
, 0);
4148 local_set(&cpu_buffer
->dropped_events
, 0);
4149 local_set(&cpu_buffer
->entries
, 0);
4150 local_set(&cpu_buffer
->committing
, 0);
4151 local_set(&cpu_buffer
->commits
, 0);
4152 cpu_buffer
->read
= 0;
4153 cpu_buffer
->read_bytes
= 0;
4155 cpu_buffer
->write_stamp
= 0;
4156 cpu_buffer
->read_stamp
= 0;
4158 cpu_buffer
->lost_events
= 0;
4159 cpu_buffer
->last_overrun
= 0;
4161 rb_head_page_activate(cpu_buffer
);
4165 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4166 * @buffer: The ring buffer to reset a per cpu buffer of
4167 * @cpu: The CPU buffer to be reset
4169 void ring_buffer_reset_cpu(struct ring_buffer
*buffer
, int cpu
)
4171 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
4172 unsigned long flags
;
4174 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4177 atomic_inc(&buffer
->resize_disabled
);
4178 atomic_inc(&cpu_buffer
->record_disabled
);
4180 /* Make sure all commits have finished */
4181 synchronize_sched();
4183 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4185 if (RB_WARN_ON(cpu_buffer
, local_read(&cpu_buffer
->committing
)))
4188 arch_spin_lock(&cpu_buffer
->lock
);
4190 rb_reset_cpu(cpu_buffer
);
4192 arch_spin_unlock(&cpu_buffer
->lock
);
4195 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4197 atomic_dec(&cpu_buffer
->record_disabled
);
4198 atomic_dec(&buffer
->resize_disabled
);
4200 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu
);
4203 * ring_buffer_reset - reset a ring buffer
4204 * @buffer: The ring buffer to reset all cpu buffers
4206 void ring_buffer_reset(struct ring_buffer
*buffer
)
4210 for_each_buffer_cpu(buffer
, cpu
)
4211 ring_buffer_reset_cpu(buffer
, cpu
);
4213 EXPORT_SYMBOL_GPL(ring_buffer_reset
);
4216 * rind_buffer_empty - is the ring buffer empty?
4217 * @buffer: The ring buffer to test
4219 int ring_buffer_empty(struct ring_buffer
*buffer
)
4221 struct ring_buffer_per_cpu
*cpu_buffer
;
4222 unsigned long flags
;
4227 dolock
= rb_ok_to_lock();
4229 /* yes this is racy, but if you don't like the race, lock the buffer */
4230 for_each_buffer_cpu(buffer
, cpu
) {
4231 cpu_buffer
= buffer
->buffers
[cpu
];
4232 local_irq_save(flags
);
4234 raw_spin_lock(&cpu_buffer
->reader_lock
);
4235 ret
= rb_per_cpu_empty(cpu_buffer
);
4237 raw_spin_unlock(&cpu_buffer
->reader_lock
);
4238 local_irq_restore(flags
);
4246 EXPORT_SYMBOL_GPL(ring_buffer_empty
);
4249 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4250 * @buffer: The ring buffer
4251 * @cpu: The CPU buffer to test
4253 int ring_buffer_empty_cpu(struct ring_buffer
*buffer
, int cpu
)
4255 struct ring_buffer_per_cpu
*cpu_buffer
;
4256 unsigned long flags
;
4260 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4263 dolock
= rb_ok_to_lock();
4265 cpu_buffer
= buffer
->buffers
[cpu
];
4266 local_irq_save(flags
);
4268 raw_spin_lock(&cpu_buffer
->reader_lock
);
4269 ret
= rb_per_cpu_empty(cpu_buffer
);
4271 raw_spin_unlock(&cpu_buffer
->reader_lock
);
4272 local_irq_restore(flags
);
4276 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu
);
4278 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4280 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4281 * @buffer_a: One buffer to swap with
4282 * @buffer_b: The other buffer to swap with
4284 * This function is useful for tracers that want to take a "snapshot"
4285 * of a CPU buffer and has another back up buffer lying around.
4286 * it is expected that the tracer handles the cpu buffer not being
4287 * used at the moment.
4289 int ring_buffer_swap_cpu(struct ring_buffer
*buffer_a
,
4290 struct ring_buffer
*buffer_b
, int cpu
)
4292 struct ring_buffer_per_cpu
*cpu_buffer_a
;
4293 struct ring_buffer_per_cpu
*cpu_buffer_b
;
4296 if (!cpumask_test_cpu(cpu
, buffer_a
->cpumask
) ||
4297 !cpumask_test_cpu(cpu
, buffer_b
->cpumask
))
4300 cpu_buffer_a
= buffer_a
->buffers
[cpu
];
4301 cpu_buffer_b
= buffer_b
->buffers
[cpu
];
4303 /* At least make sure the two buffers are somewhat the same */
4304 if (cpu_buffer_a
->nr_pages
!= cpu_buffer_b
->nr_pages
)
4309 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
4312 if (atomic_read(&buffer_a
->record_disabled
))
4315 if (atomic_read(&buffer_b
->record_disabled
))
4318 if (atomic_read(&cpu_buffer_a
->record_disabled
))
4321 if (atomic_read(&cpu_buffer_b
->record_disabled
))
4325 * We can't do a synchronize_sched here because this
4326 * function can be called in atomic context.
4327 * Normally this will be called from the same CPU as cpu.
4328 * If not it's up to the caller to protect this.
4330 atomic_inc(&cpu_buffer_a
->record_disabled
);
4331 atomic_inc(&cpu_buffer_b
->record_disabled
);
4334 if (local_read(&cpu_buffer_a
->committing
))
4336 if (local_read(&cpu_buffer_b
->committing
))
4339 buffer_a
->buffers
[cpu
] = cpu_buffer_b
;
4340 buffer_b
->buffers
[cpu
] = cpu_buffer_a
;
4342 cpu_buffer_b
->buffer
= buffer_a
;
4343 cpu_buffer_a
->buffer
= buffer_b
;
4348 atomic_dec(&cpu_buffer_a
->record_disabled
);
4349 atomic_dec(&cpu_buffer_b
->record_disabled
);
4353 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu
);
4354 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4357 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4358 * @buffer: the buffer to allocate for.
4359 * @cpu: the cpu buffer to allocate.
4361 * This function is used in conjunction with ring_buffer_read_page.
4362 * When reading a full page from the ring buffer, these functions
4363 * can be used to speed up the process. The calling function should
4364 * allocate a few pages first with this function. Then when it
4365 * needs to get pages from the ring buffer, it passes the result
4366 * of this function into ring_buffer_read_page, which will swap
4367 * the page that was allocated, with the read page of the buffer.
4370 * The page allocated, or NULL on error.
4372 void *ring_buffer_alloc_read_page(struct ring_buffer
*buffer
, int cpu
)
4374 struct buffer_data_page
*bpage
;
4377 page
= alloc_pages_node(cpu_to_node(cpu
),
4378 GFP_KERNEL
| __GFP_NORETRY
, 0);
4382 bpage
= page_address(page
);
4384 rb_init_page(bpage
);
4388 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page
);
4391 * ring_buffer_free_read_page - free an allocated read page
4392 * @buffer: the buffer the page was allocate for
4393 * @data: the page to free
4395 * Free a page allocated from ring_buffer_alloc_read_page.
4397 void ring_buffer_free_read_page(struct ring_buffer
*buffer
, void *data
)
4399 free_page((unsigned long)data
);
4401 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page
);
4404 * ring_buffer_read_page - extract a page from the ring buffer
4405 * @buffer: buffer to extract from
4406 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4407 * @len: amount to extract
4408 * @cpu: the cpu of the buffer to extract
4409 * @full: should the extraction only happen when the page is full.
4411 * This function will pull out a page from the ring buffer and consume it.
4412 * @data_page must be the address of the variable that was returned
4413 * from ring_buffer_alloc_read_page. This is because the page might be used
4414 * to swap with a page in the ring buffer.
4417 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4420 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4422 * process_page(rpage, ret);
4424 * When @full is set, the function will not return true unless
4425 * the writer is off the reader page.
4427 * Note: it is up to the calling functions to handle sleeps and wakeups.
4428 * The ring buffer can be used anywhere in the kernel and can not
4429 * blindly call wake_up. The layer that uses the ring buffer must be
4430 * responsible for that.
4433 * >=0 if data has been transferred, returns the offset of consumed data.
4434 * <0 if no data has been transferred.
4436 int ring_buffer_read_page(struct ring_buffer
*buffer
,
4437 void **data_page
, size_t len
, int cpu
, int full
)
4439 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
4440 struct ring_buffer_event
*event
;
4441 struct buffer_data_page
*bpage
;
4442 struct buffer_page
*reader
;
4443 unsigned long missed_events
;
4444 unsigned long flags
;
4445 unsigned int commit
;
4450 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4454 * If len is not big enough to hold the page header, then
4455 * we can not copy anything.
4457 if (len
<= BUF_PAGE_HDR_SIZE
)
4460 len
-= BUF_PAGE_HDR_SIZE
;
4469 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4471 reader
= rb_get_reader_page(cpu_buffer
);
4475 event
= rb_reader_event(cpu_buffer
);
4477 read
= reader
->read
;
4478 commit
= rb_page_commit(reader
);
4480 /* Check if any events were dropped */
4481 missed_events
= cpu_buffer
->lost_events
;
4484 * If this page has been partially read or
4485 * if len is not big enough to read the rest of the page or
4486 * a writer is still on the page, then
4487 * we must copy the data from the page to the buffer.
4488 * Otherwise, we can simply swap the page with the one passed in.
4490 if (read
|| (len
< (commit
- read
)) ||
4491 cpu_buffer
->reader_page
== cpu_buffer
->commit_page
) {
4492 struct buffer_data_page
*rpage
= cpu_buffer
->reader_page
->page
;
4493 unsigned int rpos
= read
;
4494 unsigned int pos
= 0;
4500 if (len
> (commit
- read
))
4501 len
= (commit
- read
);
4503 /* Always keep the time extend and data together */
4504 size
= rb_event_ts_length(event
);
4509 /* save the current timestamp, since the user will need it */
4510 save_timestamp
= cpu_buffer
->read_stamp
;
4512 /* Need to copy one event at a time */
4514 /* We need the size of one event, because
4515 * rb_advance_reader only advances by one event,
4516 * whereas rb_event_ts_length may include the size of
4517 * one or two events.
4518 * We have already ensured there's enough space if this
4519 * is a time extend. */
4520 size
= rb_event_length(event
);
4521 memcpy(bpage
->data
+ pos
, rpage
->data
+ rpos
, size
);
4525 rb_advance_reader(cpu_buffer
);
4526 rpos
= reader
->read
;
4532 event
= rb_reader_event(cpu_buffer
);
4533 /* Always keep the time extend and data together */
4534 size
= rb_event_ts_length(event
);
4535 } while (len
>= size
);
4538 local_set(&bpage
->commit
, pos
);
4539 bpage
->time_stamp
= save_timestamp
;
4541 /* we copied everything to the beginning */
4544 /* update the entry counter */
4545 cpu_buffer
->read
+= rb_page_entries(reader
);
4546 cpu_buffer
->read_bytes
+= BUF_PAGE_SIZE
;
4548 /* swap the pages */
4549 rb_init_page(bpage
);
4550 bpage
= reader
->page
;
4551 reader
->page
= *data_page
;
4552 local_set(&reader
->write
, 0);
4553 local_set(&reader
->entries
, 0);
4558 * Use the real_end for the data size,
4559 * This gives us a chance to store the lost events
4562 if (reader
->real_end
)
4563 local_set(&bpage
->commit
, reader
->real_end
);
4567 cpu_buffer
->lost_events
= 0;
4569 commit
= local_read(&bpage
->commit
);
4571 * Set a flag in the commit field if we lost events
4573 if (missed_events
) {
4574 /* If there is room at the end of the page to save the
4575 * missed events, then record it there.
4577 if (BUF_PAGE_SIZE
- commit
>= sizeof(missed_events
)) {
4578 memcpy(&bpage
->data
[commit
], &missed_events
,
4579 sizeof(missed_events
));
4580 local_add(RB_MISSED_STORED
, &bpage
->commit
);
4581 commit
+= sizeof(missed_events
);
4583 local_add(RB_MISSED_EVENTS
, &bpage
->commit
);
4587 * This page may be off to user land. Zero it out here.
4589 if (commit
< BUF_PAGE_SIZE
)
4590 memset(&bpage
->data
[commit
], 0, BUF_PAGE_SIZE
- commit
);
4593 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4598 EXPORT_SYMBOL_GPL(ring_buffer_read_page
);
4600 #ifdef CONFIG_HOTPLUG_CPU
4601 static int rb_cpu_notify(struct notifier_block
*self
,
4602 unsigned long action
, void *hcpu
)
4604 struct ring_buffer
*buffer
=
4605 container_of(self
, struct ring_buffer
, cpu_notify
);
4606 long cpu
= (long)hcpu
;
4607 int cpu_i
, nr_pages_same
;
4608 unsigned int nr_pages
;
4611 case CPU_UP_PREPARE
:
4612 case CPU_UP_PREPARE_FROZEN
:
4613 if (cpumask_test_cpu(cpu
, buffer
->cpumask
))
4618 /* check if all cpu sizes are same */
4619 for_each_buffer_cpu(buffer
, cpu_i
) {
4620 /* fill in the size from first enabled cpu */
4622 nr_pages
= buffer
->buffers
[cpu_i
]->nr_pages
;
4623 if (nr_pages
!= buffer
->buffers
[cpu_i
]->nr_pages
) {
4628 /* allocate minimum pages, user can later expand it */
4631 buffer
->buffers
[cpu
] =
4632 rb_allocate_cpu_buffer(buffer
, nr_pages
, cpu
);
4633 if (!buffer
->buffers
[cpu
]) {
4634 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4639 cpumask_set_cpu(cpu
, buffer
->cpumask
);
4641 case CPU_DOWN_PREPARE
:
4642 case CPU_DOWN_PREPARE_FROZEN
:
4645 * If we were to free the buffer, then the user would
4646 * lose any trace that was in the buffer.
4656 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4658 * This is a basic integrity check of the ring buffer.
4659 * Late in the boot cycle this test will run when configured in.
4660 * It will kick off a thread per CPU that will go into a loop
4661 * writing to the per cpu ring buffer various sizes of data.
4662 * Some of the data will be large items, some small.
4664 * Another thread is created that goes into a spin, sending out
4665 * IPIs to the other CPUs to also write into the ring buffer.
4666 * this is to test the nesting ability of the buffer.
4668 * Basic stats are recorded and reported. If something in the
4669 * ring buffer should happen that's not expected, a big warning
4670 * is displayed and all ring buffers are disabled.
4672 static struct task_struct
*rb_threads
[NR_CPUS
] __initdata
;
4674 struct rb_test_data
{
4675 struct ring_buffer
*buffer
;
4676 unsigned long events
;
4677 unsigned long bytes_written
;
4678 unsigned long bytes_alloc
;
4679 unsigned long bytes_dropped
;
4680 unsigned long events_nested
;
4681 unsigned long bytes_written_nested
;
4682 unsigned long bytes_alloc_nested
;
4683 unsigned long bytes_dropped_nested
;
4684 int min_size_nested
;
4685 int max_size_nested
;
4692 static struct rb_test_data rb_data
[NR_CPUS
] __initdata
;
4695 #define RB_TEST_BUFFER_SIZE 1048576
4697 static char rb_string
[] __initdata
=
4698 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4699 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4700 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4702 static bool rb_test_started __initdata
;
4709 static __init
int rb_write_something(struct rb_test_data
*data
, bool nested
)
4711 struct ring_buffer_event
*event
;
4712 struct rb_item
*item
;
4719 /* Have nested writes different that what is written */
4720 cnt
= data
->cnt
+ (nested
? 27 : 0);
4722 /* Multiply cnt by ~e, to make some unique increment */
4723 size
= (data
->cnt
* 68 / 25) % (sizeof(rb_string
) - 1);
4725 len
= size
+ sizeof(struct rb_item
);
4727 started
= rb_test_started
;
4728 /* read rb_test_started before checking buffer enabled */
4731 event
= ring_buffer_lock_reserve(data
->buffer
, len
);
4733 /* Ignore dropped events before test starts. */
4736 data
->bytes_dropped
+= len
;
4738 data
->bytes_dropped_nested
+= len
;
4743 event_len
= ring_buffer_event_length(event
);
4745 if (RB_WARN_ON(data
->buffer
, event_len
< len
))
4748 item
= ring_buffer_event_data(event
);
4750 memcpy(item
->str
, rb_string
, size
);
4753 data
->bytes_alloc_nested
+= event_len
;
4754 data
->bytes_written_nested
+= len
;
4755 data
->events_nested
++;
4756 if (!data
->min_size_nested
|| len
< data
->min_size_nested
)
4757 data
->min_size_nested
= len
;
4758 if (len
> data
->max_size_nested
)
4759 data
->max_size_nested
= len
;
4761 data
->bytes_alloc
+= event_len
;
4762 data
->bytes_written
+= len
;
4764 if (!data
->min_size
|| len
< data
->min_size
)
4765 data
->max_size
= len
;
4766 if (len
> data
->max_size
)
4767 data
->max_size
= len
;
4771 ring_buffer_unlock_commit(data
->buffer
, event
);
4776 static __init
int rb_test(void *arg
)
4778 struct rb_test_data
*data
= arg
;
4780 while (!kthread_should_stop()) {
4781 rb_write_something(data
, false);
4784 set_current_state(TASK_INTERRUPTIBLE
);
4785 /* Now sleep between a min of 100-300us and a max of 1ms */
4786 usleep_range(((data
->cnt
% 3) + 1) * 100, 1000);
4792 static __init
void rb_ipi(void *ignore
)
4794 struct rb_test_data
*data
;
4795 int cpu
= smp_processor_id();
4797 data
= &rb_data
[cpu
];
4798 rb_write_something(data
, true);
4801 static __init
int rb_hammer_test(void *arg
)
4803 while (!kthread_should_stop()) {
4805 /* Send an IPI to all cpus to write data! */
4806 smp_call_function(rb_ipi
, NULL
, 1);
4807 /* No sleep, but for non preempt, let others run */
4814 static __init
int test_ringbuffer(void)
4816 struct task_struct
*rb_hammer
;
4817 struct ring_buffer
*buffer
;
4821 pr_info("Running ring buffer tests...\n");
4823 buffer
= ring_buffer_alloc(RB_TEST_BUFFER_SIZE
, RB_FL_OVERWRITE
);
4824 if (WARN_ON(!buffer
))
4827 /* Disable buffer so that threads can't write to it yet */
4828 ring_buffer_record_off(buffer
);
4830 for_each_online_cpu(cpu
) {
4831 rb_data
[cpu
].buffer
= buffer
;
4832 rb_data
[cpu
].cpu
= cpu
;
4833 rb_data
[cpu
].cnt
= cpu
;
4834 rb_threads
[cpu
] = kthread_create(rb_test
, &rb_data
[cpu
],
4835 "rbtester/%d", cpu
);
4836 if (WARN_ON(!rb_threads
[cpu
])) {
4837 pr_cont("FAILED\n");
4842 kthread_bind(rb_threads
[cpu
], cpu
);
4843 wake_up_process(rb_threads
[cpu
]);
4846 /* Now create the rb hammer! */
4847 rb_hammer
= kthread_run(rb_hammer_test
, NULL
, "rbhammer");
4848 if (WARN_ON(!rb_hammer
)) {
4849 pr_cont("FAILED\n");
4854 ring_buffer_record_on(buffer
);
4856 * Show buffer is enabled before setting rb_test_started.
4857 * Yes there's a small race window where events could be
4858 * dropped and the thread wont catch it. But when a ring
4859 * buffer gets enabled, there will always be some kind of
4860 * delay before other CPUs see it. Thus, we don't care about
4861 * those dropped events. We care about events dropped after
4862 * the threads see that the buffer is active.
4865 rb_test_started
= true;
4867 set_current_state(TASK_INTERRUPTIBLE
);
4868 /* Just run for 10 seconds */;
4869 schedule_timeout(10 * HZ
);
4871 kthread_stop(rb_hammer
);
4874 for_each_online_cpu(cpu
) {
4875 if (!rb_threads
[cpu
])
4877 kthread_stop(rb_threads
[cpu
]);
4880 ring_buffer_free(buffer
);
4885 pr_info("finished\n");
4886 for_each_online_cpu(cpu
) {
4887 struct ring_buffer_event
*event
;
4888 struct rb_test_data
*data
= &rb_data
[cpu
];
4889 struct rb_item
*item
;
4890 unsigned long total_events
;
4891 unsigned long total_dropped
;
4892 unsigned long total_written
;
4893 unsigned long total_alloc
;
4894 unsigned long total_read
= 0;
4895 unsigned long total_size
= 0;
4896 unsigned long total_len
= 0;
4897 unsigned long total_lost
= 0;
4900 int small_event_size
;
4904 total_events
= data
->events
+ data
->events_nested
;
4905 total_written
= data
->bytes_written
+ data
->bytes_written_nested
;
4906 total_alloc
= data
->bytes_alloc
+ data
->bytes_alloc_nested
;
4907 total_dropped
= data
->bytes_dropped
+ data
->bytes_dropped_nested
;
4909 big_event_size
= data
->max_size
+ data
->max_size_nested
;
4910 small_event_size
= data
->min_size
+ data
->min_size_nested
;
4912 pr_info("CPU %d:\n", cpu
);
4913 pr_info(" events: %ld\n", total_events
);
4914 pr_info(" dropped bytes: %ld\n", total_dropped
);
4915 pr_info(" alloced bytes: %ld\n", total_alloc
);
4916 pr_info(" written bytes: %ld\n", total_written
);
4917 pr_info(" biggest event: %d\n", big_event_size
);
4918 pr_info(" smallest event: %d\n", small_event_size
);
4920 if (RB_WARN_ON(buffer
, total_dropped
))
4925 while ((event
= ring_buffer_consume(buffer
, cpu
, NULL
, &lost
))) {
4927 item
= ring_buffer_event_data(event
);
4928 total_len
+= ring_buffer_event_length(event
);
4929 total_size
+= item
->size
+ sizeof(struct rb_item
);
4930 if (memcmp(&item
->str
[0], rb_string
, item
->size
) != 0) {
4931 pr_info("FAILED!\n");
4932 pr_info("buffer had: %.*s\n", item
->size
, item
->str
);
4933 pr_info("expected: %.*s\n", item
->size
, rb_string
);
4934 RB_WARN_ON(buffer
, 1);
4945 pr_info(" read events: %ld\n", total_read
);
4946 pr_info(" lost events: %ld\n", total_lost
);
4947 pr_info(" total events: %ld\n", total_lost
+ total_read
);
4948 pr_info(" recorded len bytes: %ld\n", total_len
);
4949 pr_info(" recorded size bytes: %ld\n", total_size
);
4951 pr_info(" With dropped events, record len and size may not match\n"
4952 " alloced and written from above\n");
4954 if (RB_WARN_ON(buffer
, total_len
!= total_alloc
||
4955 total_size
!= total_written
))
4958 if (RB_WARN_ON(buffer
, total_lost
+ total_read
!= total_events
))
4964 pr_info("Ring buffer PASSED!\n");
4966 ring_buffer_free(buffer
);
4970 late_initcall(test_ringbuffer
);
4971 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */