503b630e0bda6fdd246bd50b47b9d4f652100aae
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/kmemcheck.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/mutex.h>
17 #include <linux/init.h>
18 #include <linux/hash.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/fs.h>
22
23 #include "trace.h"
24
25 /*
26 * The ring buffer header is special. We must manually up keep it.
27 */
28 int ring_buffer_print_entry_header(struct trace_seq *s)
29 {
30 int ret;
31
32 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
34 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
41 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
43
44 return ret;
45 }
46
47 /*
48 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
115 /*
116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133 /*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144 enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147 };
148
149 enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152 };
153
154 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
155
156 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
158 /**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164 void tracing_on(void)
165 {
166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
167 }
168 EXPORT_SYMBOL_GPL(tracing_on);
169
170 /**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178 void tracing_off(void)
179 {
180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181 }
182 EXPORT_SYMBOL_GPL(tracing_off);
183
184 /**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
188 * permanently.
189 */
190 void tracing_off_permanent(void)
191 {
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
193 }
194
195 /**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198 int tracing_is_on(void)
199 {
200 return ring_buffer_flags == RB_BUFFERS_ON;
201 }
202 EXPORT_SYMBOL_GPL(tracing_is_on);
203
204 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
205 #define RB_ALIGNMENT 4U
206 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
208
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211
212 enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215 };
216
217 static inline int rb_null_event(struct ring_buffer_event *event)
218 {
219 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
220 }
221
222 static void rb_event_set_padding(struct ring_buffer_event *event)
223 {
224 /* padding has a NULL time_delta */
225 event->type_len = RINGBUF_TYPE_PADDING;
226 event->time_delta = 0;
227 }
228
229 static unsigned
230 rb_event_data_length(struct ring_buffer_event *event)
231 {
232 unsigned length;
233
234 if (event->type_len)
235 length = event->type_len * RB_ALIGNMENT;
236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239 }
240
241 /* inline for ring buffer fast paths */
242 static unsigned
243 rb_event_length(struct ring_buffer_event *event)
244 {
245 switch (event->type_len) {
246 case RINGBUF_TYPE_PADDING:
247 if (rb_null_event(event))
248 /* undefined */
249 return -1;
250 return event->array[0] + RB_EVNT_HDR_SIZE;
251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
259 return rb_event_data_length(event);
260 default:
261 BUG();
262 }
263 /* not hit */
264 return 0;
265 }
266
267 /**
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
270 */
271 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272 {
273 unsigned length = rb_event_length(event);
274 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
280 }
281 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
282
283 /* inline for ring buffer fast paths */
284 static void *
285 rb_event_data(struct ring_buffer_event *event)
286 {
287 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
288 /* If length is in len field, then array[0] has the data */
289 if (event->type_len)
290 return (void *)&event->array[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event->array[1];
293 }
294
295 /**
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
298 */
299 void *ring_buffer_event_data(struct ring_buffer_event *event)
300 {
301 return rb_event_data(event);
302 }
303 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
304
305 #define for_each_buffer_cpu(buffer, cpu) \
306 for_each_cpu(cpu, buffer->cpumask)
307
308 #define TS_SHIFT 27
309 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
310 #define TS_DELTA_TEST (~TS_MASK)
311
312 struct buffer_data_page {
313 u64 time_stamp; /* page time stamp */
314 local_t commit; /* write committed index */
315 unsigned char data[]; /* data of buffer page */
316 };
317
318 /*
319 * Note, the buffer_page list must be first. The buffer pages
320 * are allocated in cache lines, which means that each buffer
321 * page will be at the beginning of a cache line, and thus
322 * the least significant bits will be zero. We use this to
323 * add flags in the list struct pointers, to make the ring buffer
324 * lockless.
325 */
326 struct buffer_page {
327 struct list_head list; /* list of buffer pages */
328 local_t write; /* index for next write */
329 unsigned read; /* index for next read */
330 local_t entries; /* entries on this page */
331 struct buffer_data_page *page; /* Actual data page */
332 };
333
334 /*
335 * The buffer page counters, write and entries, must be reset
336 * atomically when crossing page boundaries. To synchronize this
337 * update, two counters are inserted into the number. One is
338 * the actual counter for the write position or count on the page.
339 *
340 * The other is a counter of updaters. Before an update happens
341 * the update partition of the counter is incremented. This will
342 * allow the updater to update the counter atomically.
343 *
344 * The counter is 20 bits, and the state data is 12.
345 */
346 #define RB_WRITE_MASK 0xfffff
347 #define RB_WRITE_INTCNT (1 << 20)
348
349 static void rb_init_page(struct buffer_data_page *bpage)
350 {
351 local_set(&bpage->commit, 0);
352 }
353
354 /**
355 * ring_buffer_page_len - the size of data on the page.
356 * @page: The page to read
357 *
358 * Returns the amount of data on the page, including buffer page header.
359 */
360 size_t ring_buffer_page_len(void *page)
361 {
362 return local_read(&((struct buffer_data_page *)page)->commit)
363 + BUF_PAGE_HDR_SIZE;
364 }
365
366 /*
367 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
368 * this issue out.
369 */
370 static void free_buffer_page(struct buffer_page *bpage)
371 {
372 free_page((unsigned long)bpage->page);
373 kfree(bpage);
374 }
375
376 /*
377 * We need to fit the time_stamp delta into 27 bits.
378 */
379 static inline int test_time_stamp(u64 delta)
380 {
381 if (delta & TS_DELTA_TEST)
382 return 1;
383 return 0;
384 }
385
386 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
387
388 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
390
391 /* Max number of timestamps that can fit on a page */
392 #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
393
394 int ring_buffer_print_page_header(struct trace_seq *s)
395 {
396 struct buffer_data_page field;
397 int ret;
398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp),
402 (unsigned int)is_signed_type(u64));
403
404 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
406 (unsigned int)offsetof(typeof(field), commit),
407 (unsigned int)sizeof(field.commit),
408 (unsigned int)is_signed_type(long));
409
410 ret = trace_seq_printf(s, "\tfield: char data;\t"
411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
412 (unsigned int)offsetof(typeof(field), data),
413 (unsigned int)BUF_PAGE_SIZE,
414 (unsigned int)is_signed_type(char));
415
416 return ret;
417 }
418
419 /*
420 * head_page == tail_page && head == tail then buffer is empty.
421 */
422 struct ring_buffer_per_cpu {
423 int cpu;
424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */
426 arch_spinlock_t lock;
427 struct lock_class_key lock_key;
428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */
430 struct buffer_page *tail_page; /* write to tail */
431 struct buffer_page *commit_page; /* committed pages */
432 struct buffer_page *reader_page;
433 local_t commit_overrun;
434 local_t overrun;
435 local_t entries;
436 local_t committing;
437 local_t commits;
438 unsigned long read;
439 u64 write_stamp;
440 u64 read_stamp;
441 atomic_t record_disabled;
442 };
443
444 struct ring_buffer {
445 unsigned pages;
446 unsigned flags;
447 int cpus;
448 atomic_t record_disabled;
449 cpumask_var_t cpumask;
450
451 struct lock_class_key *reader_lock_key;
452
453 struct mutex mutex;
454
455 struct ring_buffer_per_cpu **buffers;
456
457 #ifdef CONFIG_HOTPLUG_CPU
458 struct notifier_block cpu_notify;
459 #endif
460 u64 (*clock)(void);
461 };
462
463 struct ring_buffer_iter {
464 struct ring_buffer_per_cpu *cpu_buffer;
465 unsigned long head;
466 struct buffer_page *head_page;
467 struct buffer_page *cache_reader_page;
468 unsigned long cache_read;
469 u64 read_stamp;
470 };
471
472 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
473 #define RB_WARN_ON(b, cond) \
474 ({ \
475 int _____ret = unlikely(cond); \
476 if (_____ret) { \
477 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
478 struct ring_buffer_per_cpu *__b = \
479 (void *)b; \
480 atomic_inc(&__b->buffer->record_disabled); \
481 } else \
482 atomic_inc(&b->record_disabled); \
483 WARN_ON(1); \
484 } \
485 _____ret; \
486 })
487
488 /* Up this if you want to test the TIME_EXTENTS and normalization */
489 #define DEBUG_SHIFT 0
490
491 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
492 {
493 /* shift to debug/test normalization and TIME_EXTENTS */
494 return buffer->clock() << DEBUG_SHIFT;
495 }
496
497 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
498 {
499 u64 time;
500
501 preempt_disable_notrace();
502 time = rb_time_stamp(buffer);
503 preempt_enable_no_resched_notrace();
504
505 return time;
506 }
507 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
508
509 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
510 int cpu, u64 *ts)
511 {
512 /* Just stupid testing the normalize function and deltas */
513 *ts >>= DEBUG_SHIFT;
514 }
515 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
516
517 /*
518 * Making the ring buffer lockless makes things tricky.
519 * Although writes only happen on the CPU that they are on,
520 * and they only need to worry about interrupts. Reads can
521 * happen on any CPU.
522 *
523 * The reader page is always off the ring buffer, but when the
524 * reader finishes with a page, it needs to swap its page with
525 * a new one from the buffer. The reader needs to take from
526 * the head (writes go to the tail). But if a writer is in overwrite
527 * mode and wraps, it must push the head page forward.
528 *
529 * Here lies the problem.
530 *
531 * The reader must be careful to replace only the head page, and
532 * not another one. As described at the top of the file in the
533 * ASCII art, the reader sets its old page to point to the next
534 * page after head. It then sets the page after head to point to
535 * the old reader page. But if the writer moves the head page
536 * during this operation, the reader could end up with the tail.
537 *
538 * We use cmpxchg to help prevent this race. We also do something
539 * special with the page before head. We set the LSB to 1.
540 *
541 * When the writer must push the page forward, it will clear the
542 * bit that points to the head page, move the head, and then set
543 * the bit that points to the new head page.
544 *
545 * We also don't want an interrupt coming in and moving the head
546 * page on another writer. Thus we use the second LSB to catch
547 * that too. Thus:
548 *
549 * head->list->prev->next bit 1 bit 0
550 * ------- -------
551 * Normal page 0 0
552 * Points to head page 0 1
553 * New head page 1 0
554 *
555 * Note we can not trust the prev pointer of the head page, because:
556 *
557 * +----+ +-----+ +-----+
558 * | |------>| T |---X--->| N |
559 * | |<------| | | |
560 * +----+ +-----+ +-----+
561 * ^ ^ |
562 * | +-----+ | |
563 * +----------| R |----------+ |
564 * | |<-----------+
565 * +-----+
566 *
567 * Key: ---X--> HEAD flag set in pointer
568 * T Tail page
569 * R Reader page
570 * N Next page
571 *
572 * (see __rb_reserve_next() to see where this happens)
573 *
574 * What the above shows is that the reader just swapped out
575 * the reader page with a page in the buffer, but before it
576 * could make the new header point back to the new page added
577 * it was preempted by a writer. The writer moved forward onto
578 * the new page added by the reader and is about to move forward
579 * again.
580 *
581 * You can see, it is legitimate for the previous pointer of
582 * the head (or any page) not to point back to itself. But only
583 * temporarially.
584 */
585
586 #define RB_PAGE_NORMAL 0UL
587 #define RB_PAGE_HEAD 1UL
588 #define RB_PAGE_UPDATE 2UL
589
590
591 #define RB_FLAG_MASK 3UL
592
593 /* PAGE_MOVED is not part of the mask */
594 #define RB_PAGE_MOVED 4UL
595
596 /*
597 * rb_list_head - remove any bit
598 */
599 static struct list_head *rb_list_head(struct list_head *list)
600 {
601 unsigned long val = (unsigned long)list;
602
603 return (struct list_head *)(val & ~RB_FLAG_MASK);
604 }
605
606 /*
607 * rb_is_head_page - test if the given page is the head page
608 *
609 * Because the reader may move the head_page pointer, we can
610 * not trust what the head page is (it may be pointing to
611 * the reader page). But if the next page is a header page,
612 * its flags will be non zero.
613 */
614 static int inline
615 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
616 struct buffer_page *page, struct list_head *list)
617 {
618 unsigned long val;
619
620 val = (unsigned long)list->next;
621
622 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
623 return RB_PAGE_MOVED;
624
625 return val & RB_FLAG_MASK;
626 }
627
628 /*
629 * rb_is_reader_page
630 *
631 * The unique thing about the reader page, is that, if the
632 * writer is ever on it, the previous pointer never points
633 * back to the reader page.
634 */
635 static int rb_is_reader_page(struct buffer_page *page)
636 {
637 struct list_head *list = page->list.prev;
638
639 return rb_list_head(list->next) != &page->list;
640 }
641
642 /*
643 * rb_set_list_to_head - set a list_head to be pointing to head.
644 */
645 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
646 struct list_head *list)
647 {
648 unsigned long *ptr;
649
650 ptr = (unsigned long *)&list->next;
651 *ptr |= RB_PAGE_HEAD;
652 *ptr &= ~RB_PAGE_UPDATE;
653 }
654
655 /*
656 * rb_head_page_activate - sets up head page
657 */
658 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
659 {
660 struct buffer_page *head;
661
662 head = cpu_buffer->head_page;
663 if (!head)
664 return;
665
666 /*
667 * Set the previous list pointer to have the HEAD flag.
668 */
669 rb_set_list_to_head(cpu_buffer, head->list.prev);
670 }
671
672 static void rb_list_head_clear(struct list_head *list)
673 {
674 unsigned long *ptr = (unsigned long *)&list->next;
675
676 *ptr &= ~RB_FLAG_MASK;
677 }
678
679 /*
680 * rb_head_page_dactivate - clears head page ptr (for free list)
681 */
682 static void
683 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
684 {
685 struct list_head *hd;
686
687 /* Go through the whole list and clear any pointers found. */
688 rb_list_head_clear(cpu_buffer->pages);
689
690 list_for_each(hd, cpu_buffer->pages)
691 rb_list_head_clear(hd);
692 }
693
694 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
695 struct buffer_page *head,
696 struct buffer_page *prev,
697 int old_flag, int new_flag)
698 {
699 struct list_head *list;
700 unsigned long val = (unsigned long)&head->list;
701 unsigned long ret;
702
703 list = &prev->list;
704
705 val &= ~RB_FLAG_MASK;
706
707 ret = cmpxchg((unsigned long *)&list->next,
708 val | old_flag, val | new_flag);
709
710 /* check if the reader took the page */
711 if ((ret & ~RB_FLAG_MASK) != val)
712 return RB_PAGE_MOVED;
713
714 return ret & RB_FLAG_MASK;
715 }
716
717 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
718 struct buffer_page *head,
719 struct buffer_page *prev,
720 int old_flag)
721 {
722 return rb_head_page_set(cpu_buffer, head, prev,
723 old_flag, RB_PAGE_UPDATE);
724 }
725
726 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
727 struct buffer_page *head,
728 struct buffer_page *prev,
729 int old_flag)
730 {
731 return rb_head_page_set(cpu_buffer, head, prev,
732 old_flag, RB_PAGE_HEAD);
733 }
734
735 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
736 struct buffer_page *head,
737 struct buffer_page *prev,
738 int old_flag)
739 {
740 return rb_head_page_set(cpu_buffer, head, prev,
741 old_flag, RB_PAGE_NORMAL);
742 }
743
744 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
745 struct buffer_page **bpage)
746 {
747 struct list_head *p = rb_list_head((*bpage)->list.next);
748
749 *bpage = list_entry(p, struct buffer_page, list);
750 }
751
752 static struct buffer_page *
753 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
754 {
755 struct buffer_page *head;
756 struct buffer_page *page;
757 struct list_head *list;
758 int i;
759
760 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
761 return NULL;
762
763 /* sanity check */
764 list = cpu_buffer->pages;
765 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
766 return NULL;
767
768 page = head = cpu_buffer->head_page;
769 /*
770 * It is possible that the writer moves the header behind
771 * where we started, and we miss in one loop.
772 * A second loop should grab the header, but we'll do
773 * three loops just because I'm paranoid.
774 */
775 for (i = 0; i < 3; i++) {
776 do {
777 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
778 cpu_buffer->head_page = page;
779 return page;
780 }
781 rb_inc_page(cpu_buffer, &page);
782 } while (page != head);
783 }
784
785 RB_WARN_ON(cpu_buffer, 1);
786
787 return NULL;
788 }
789
790 static int rb_head_page_replace(struct buffer_page *old,
791 struct buffer_page *new)
792 {
793 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
794 unsigned long val;
795 unsigned long ret;
796
797 val = *ptr & ~RB_FLAG_MASK;
798 val |= RB_PAGE_HEAD;
799
800 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
801
802 return ret == val;
803 }
804
805 /*
806 * rb_tail_page_update - move the tail page forward
807 *
808 * Returns 1 if moved tail page, 0 if someone else did.
809 */
810 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
811 struct buffer_page *tail_page,
812 struct buffer_page *next_page)
813 {
814 struct buffer_page *old_tail;
815 unsigned long old_entries;
816 unsigned long old_write;
817 int ret = 0;
818
819 /*
820 * The tail page now needs to be moved forward.
821 *
822 * We need to reset the tail page, but without messing
823 * with possible erasing of data brought in by interrupts
824 * that have moved the tail page and are currently on it.
825 *
826 * We add a counter to the write field to denote this.
827 */
828 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
829 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
830
831 /*
832 * Just make sure we have seen our old_write and synchronize
833 * with any interrupts that come in.
834 */
835 barrier();
836
837 /*
838 * If the tail page is still the same as what we think
839 * it is, then it is up to us to update the tail
840 * pointer.
841 */
842 if (tail_page == cpu_buffer->tail_page) {
843 /* Zero the write counter */
844 unsigned long val = old_write & ~RB_WRITE_MASK;
845 unsigned long eval = old_entries & ~RB_WRITE_MASK;
846
847 /*
848 * This will only succeed if an interrupt did
849 * not come in and change it. In which case, we
850 * do not want to modify it.
851 *
852 * We add (void) to let the compiler know that we do not care
853 * about the return value of these functions. We use the
854 * cmpxchg to only update if an interrupt did not already
855 * do it for us. If the cmpxchg fails, we don't care.
856 */
857 (void)local_cmpxchg(&next_page->write, old_write, val);
858 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
859
860 /*
861 * No need to worry about races with clearing out the commit.
862 * it only can increment when a commit takes place. But that
863 * only happens in the outer most nested commit.
864 */
865 local_set(&next_page->page->commit, 0);
866
867 old_tail = cmpxchg(&cpu_buffer->tail_page,
868 tail_page, next_page);
869
870 if (old_tail == tail_page)
871 ret = 1;
872 }
873
874 return ret;
875 }
876
877 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
878 struct buffer_page *bpage)
879 {
880 unsigned long val = (unsigned long)bpage;
881
882 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
883 return 1;
884
885 return 0;
886 }
887
888 /**
889 * rb_check_list - make sure a pointer to a list has the last bits zero
890 */
891 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
892 struct list_head *list)
893 {
894 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
895 return 1;
896 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
897 return 1;
898 return 0;
899 }
900
901 /**
902 * check_pages - integrity check of buffer pages
903 * @cpu_buffer: CPU buffer with pages to test
904 *
905 * As a safety measure we check to make sure the data pages have not
906 * been corrupted.
907 */
908 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
909 {
910 struct list_head *head = cpu_buffer->pages;
911 struct buffer_page *bpage, *tmp;
912
913 rb_head_page_deactivate(cpu_buffer);
914
915 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
916 return -1;
917 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
918 return -1;
919
920 if (rb_check_list(cpu_buffer, head))
921 return -1;
922
923 list_for_each_entry_safe(bpage, tmp, head, list) {
924 if (RB_WARN_ON(cpu_buffer,
925 bpage->list.next->prev != &bpage->list))
926 return -1;
927 if (RB_WARN_ON(cpu_buffer,
928 bpage->list.prev->next != &bpage->list))
929 return -1;
930 if (rb_check_list(cpu_buffer, &bpage->list))
931 return -1;
932 }
933
934 rb_head_page_activate(cpu_buffer);
935
936 return 0;
937 }
938
939 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
940 unsigned nr_pages)
941 {
942 struct buffer_page *bpage, *tmp;
943 unsigned long addr;
944 LIST_HEAD(pages);
945 unsigned i;
946
947 WARN_ON(!nr_pages);
948
949 for (i = 0; i < nr_pages; i++) {
950 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
951 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
952 if (!bpage)
953 goto free_pages;
954
955 rb_check_bpage(cpu_buffer, bpage);
956
957 list_add(&bpage->list, &pages);
958
959 addr = __get_free_page(GFP_KERNEL);
960 if (!addr)
961 goto free_pages;
962 bpage->page = (void *)addr;
963 rb_init_page(bpage->page);
964 }
965
966 /*
967 * The ring buffer page list is a circular list that does not
968 * start and end with a list head. All page list items point to
969 * other pages.
970 */
971 cpu_buffer->pages = pages.next;
972 list_del(&pages);
973
974 rb_check_pages(cpu_buffer);
975
976 return 0;
977
978 free_pages:
979 list_for_each_entry_safe(bpage, tmp, &pages, list) {
980 list_del_init(&bpage->list);
981 free_buffer_page(bpage);
982 }
983 return -ENOMEM;
984 }
985
986 static struct ring_buffer_per_cpu *
987 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
988 {
989 struct ring_buffer_per_cpu *cpu_buffer;
990 struct buffer_page *bpage;
991 unsigned long addr;
992 int ret;
993
994 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
995 GFP_KERNEL, cpu_to_node(cpu));
996 if (!cpu_buffer)
997 return NULL;
998
999 cpu_buffer->cpu = cpu;
1000 cpu_buffer->buffer = buffer;
1001 spin_lock_init(&cpu_buffer->reader_lock);
1002 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1003 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1004
1005 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1006 GFP_KERNEL, cpu_to_node(cpu));
1007 if (!bpage)
1008 goto fail_free_buffer;
1009
1010 rb_check_bpage(cpu_buffer, bpage);
1011
1012 cpu_buffer->reader_page = bpage;
1013 addr = __get_free_page(GFP_KERNEL);
1014 if (!addr)
1015 goto fail_free_reader;
1016 bpage->page = (void *)addr;
1017 rb_init_page(bpage->page);
1018
1019 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1020
1021 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1022 if (ret < 0)
1023 goto fail_free_reader;
1024
1025 cpu_buffer->head_page
1026 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1027 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1028
1029 rb_head_page_activate(cpu_buffer);
1030
1031 return cpu_buffer;
1032
1033 fail_free_reader:
1034 free_buffer_page(cpu_buffer->reader_page);
1035
1036 fail_free_buffer:
1037 kfree(cpu_buffer);
1038 return NULL;
1039 }
1040
1041 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1042 {
1043 struct list_head *head = cpu_buffer->pages;
1044 struct buffer_page *bpage, *tmp;
1045
1046 free_buffer_page(cpu_buffer->reader_page);
1047
1048 rb_head_page_deactivate(cpu_buffer);
1049
1050 if (head) {
1051 list_for_each_entry_safe(bpage, tmp, head, list) {
1052 list_del_init(&bpage->list);
1053 free_buffer_page(bpage);
1054 }
1055 bpage = list_entry(head, struct buffer_page, list);
1056 free_buffer_page(bpage);
1057 }
1058
1059 kfree(cpu_buffer);
1060 }
1061
1062 #ifdef CONFIG_HOTPLUG_CPU
1063 static int rb_cpu_notify(struct notifier_block *self,
1064 unsigned long action, void *hcpu);
1065 #endif
1066
1067 /**
1068 * ring_buffer_alloc - allocate a new ring_buffer
1069 * @size: the size in bytes per cpu that is needed.
1070 * @flags: attributes to set for the ring buffer.
1071 *
1072 * Currently the only flag that is available is the RB_FL_OVERWRITE
1073 * flag. This flag means that the buffer will overwrite old data
1074 * when the buffer wraps. If this flag is not set, the buffer will
1075 * drop data when the tail hits the head.
1076 */
1077 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1078 struct lock_class_key *key)
1079 {
1080 struct ring_buffer *buffer;
1081 int bsize;
1082 int cpu;
1083
1084 /* keep it in its own cache line */
1085 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1086 GFP_KERNEL);
1087 if (!buffer)
1088 return NULL;
1089
1090 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1091 goto fail_free_buffer;
1092
1093 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1094 buffer->flags = flags;
1095 buffer->clock = trace_clock_local;
1096 buffer->reader_lock_key = key;
1097
1098 /* need at least two pages */
1099 if (buffer->pages < 2)
1100 buffer->pages = 2;
1101
1102 /*
1103 * In case of non-hotplug cpu, if the ring-buffer is allocated
1104 * in early initcall, it will not be notified of secondary cpus.
1105 * In that off case, we need to allocate for all possible cpus.
1106 */
1107 #ifdef CONFIG_HOTPLUG_CPU
1108 get_online_cpus();
1109 cpumask_copy(buffer->cpumask, cpu_online_mask);
1110 #else
1111 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1112 #endif
1113 buffer->cpus = nr_cpu_ids;
1114
1115 bsize = sizeof(void *) * nr_cpu_ids;
1116 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1117 GFP_KERNEL);
1118 if (!buffer->buffers)
1119 goto fail_free_cpumask;
1120
1121 for_each_buffer_cpu(buffer, cpu) {
1122 buffer->buffers[cpu] =
1123 rb_allocate_cpu_buffer(buffer, cpu);
1124 if (!buffer->buffers[cpu])
1125 goto fail_free_buffers;
1126 }
1127
1128 #ifdef CONFIG_HOTPLUG_CPU
1129 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1130 buffer->cpu_notify.priority = 0;
1131 register_cpu_notifier(&buffer->cpu_notify);
1132 #endif
1133
1134 put_online_cpus();
1135 mutex_init(&buffer->mutex);
1136
1137 return buffer;
1138
1139 fail_free_buffers:
1140 for_each_buffer_cpu(buffer, cpu) {
1141 if (buffer->buffers[cpu])
1142 rb_free_cpu_buffer(buffer->buffers[cpu]);
1143 }
1144 kfree(buffer->buffers);
1145
1146 fail_free_cpumask:
1147 free_cpumask_var(buffer->cpumask);
1148 put_online_cpus();
1149
1150 fail_free_buffer:
1151 kfree(buffer);
1152 return NULL;
1153 }
1154 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1155
1156 /**
1157 * ring_buffer_free - free a ring buffer.
1158 * @buffer: the buffer to free.
1159 */
1160 void
1161 ring_buffer_free(struct ring_buffer *buffer)
1162 {
1163 int cpu;
1164
1165 get_online_cpus();
1166
1167 #ifdef CONFIG_HOTPLUG_CPU
1168 unregister_cpu_notifier(&buffer->cpu_notify);
1169 #endif
1170
1171 for_each_buffer_cpu(buffer, cpu)
1172 rb_free_cpu_buffer(buffer->buffers[cpu]);
1173
1174 put_online_cpus();
1175
1176 kfree(buffer->buffers);
1177 free_cpumask_var(buffer->cpumask);
1178
1179 kfree(buffer);
1180 }
1181 EXPORT_SYMBOL_GPL(ring_buffer_free);
1182
1183 void ring_buffer_set_clock(struct ring_buffer *buffer,
1184 u64 (*clock)(void))
1185 {
1186 buffer->clock = clock;
1187 }
1188
1189 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1190
1191 static void
1192 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 {
1194 struct buffer_page *bpage;
1195 struct list_head *p;
1196 unsigned i;
1197
1198 spin_lock_irq(&cpu_buffer->reader_lock);
1199 rb_head_page_deactivate(cpu_buffer);
1200
1201 for (i = 0; i < nr_pages; i++) {
1202 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1203 return;
1204 p = cpu_buffer->pages->next;
1205 bpage = list_entry(p, struct buffer_page, list);
1206 list_del_init(&bpage->list);
1207 free_buffer_page(bpage);
1208 }
1209 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1210 return;
1211
1212 rb_reset_cpu(cpu_buffer);
1213 rb_check_pages(cpu_buffer);
1214
1215 spin_unlock_irq(&cpu_buffer->reader_lock);
1216 }
1217
1218 static void
1219 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1220 struct list_head *pages, unsigned nr_pages)
1221 {
1222 struct buffer_page *bpage;
1223 struct list_head *p;
1224 unsigned i;
1225
1226 spin_lock_irq(&cpu_buffer->reader_lock);
1227 rb_head_page_deactivate(cpu_buffer);
1228
1229 for (i = 0; i < nr_pages; i++) {
1230 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1231 return;
1232 p = pages->next;
1233 bpage = list_entry(p, struct buffer_page, list);
1234 list_del_init(&bpage->list);
1235 list_add_tail(&bpage->list, cpu_buffer->pages);
1236 }
1237 rb_reset_cpu(cpu_buffer);
1238 rb_check_pages(cpu_buffer);
1239
1240 spin_unlock_irq(&cpu_buffer->reader_lock);
1241 }
1242
1243 /**
1244 * ring_buffer_resize - resize the ring buffer
1245 * @buffer: the buffer to resize.
1246 * @size: the new size.
1247 *
1248 * Minimum size is 2 * BUF_PAGE_SIZE.
1249 *
1250 * Returns -1 on failure.
1251 */
1252 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1253 {
1254 struct ring_buffer_per_cpu *cpu_buffer;
1255 unsigned nr_pages, rm_pages, new_pages;
1256 struct buffer_page *bpage, *tmp;
1257 unsigned long buffer_size;
1258 unsigned long addr;
1259 LIST_HEAD(pages);
1260 int i, cpu;
1261
1262 /*
1263 * Always succeed at resizing a non-existent buffer:
1264 */
1265 if (!buffer)
1266 return size;
1267
1268 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1269 size *= BUF_PAGE_SIZE;
1270 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1271
1272 /* we need a minimum of two pages */
1273 if (size < BUF_PAGE_SIZE * 2)
1274 size = BUF_PAGE_SIZE * 2;
1275
1276 if (size == buffer_size)
1277 return size;
1278
1279 atomic_inc(&buffer->record_disabled);
1280
1281 /* Make sure all writers are done with this buffer. */
1282 synchronize_sched();
1283
1284 mutex_lock(&buffer->mutex);
1285 get_online_cpus();
1286
1287 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1288
1289 if (size < buffer_size) {
1290
1291 /* easy case, just free pages */
1292 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1293 goto out_fail;
1294
1295 rm_pages = buffer->pages - nr_pages;
1296
1297 for_each_buffer_cpu(buffer, cpu) {
1298 cpu_buffer = buffer->buffers[cpu];
1299 rb_remove_pages(cpu_buffer, rm_pages);
1300 }
1301 goto out;
1302 }
1303
1304 /*
1305 * This is a bit more difficult. We only want to add pages
1306 * when we can allocate enough for all CPUs. We do this
1307 * by allocating all the pages and storing them on a local
1308 * link list. If we succeed in our allocation, then we
1309 * add these pages to the cpu_buffers. Otherwise we just free
1310 * them all and return -ENOMEM;
1311 */
1312 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1313 goto out_fail;
1314
1315 new_pages = nr_pages - buffer->pages;
1316
1317 for_each_buffer_cpu(buffer, cpu) {
1318 for (i = 0; i < new_pages; i++) {
1319 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
1320 cache_line_size()),
1321 GFP_KERNEL, cpu_to_node(cpu));
1322 if (!bpage)
1323 goto free_pages;
1324 list_add(&bpage->list, &pages);
1325 addr = __get_free_page(GFP_KERNEL);
1326 if (!addr)
1327 goto free_pages;
1328 bpage->page = (void *)addr;
1329 rb_init_page(bpage->page);
1330 }
1331 }
1332
1333 for_each_buffer_cpu(buffer, cpu) {
1334 cpu_buffer = buffer->buffers[cpu];
1335 rb_insert_pages(cpu_buffer, &pages, new_pages);
1336 }
1337
1338 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1339 goto out_fail;
1340
1341 out:
1342 buffer->pages = nr_pages;
1343 put_online_cpus();
1344 mutex_unlock(&buffer->mutex);
1345
1346 atomic_dec(&buffer->record_disabled);
1347
1348 return size;
1349
1350 free_pages:
1351 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1352 list_del_init(&bpage->list);
1353 free_buffer_page(bpage);
1354 }
1355 put_online_cpus();
1356 mutex_unlock(&buffer->mutex);
1357 atomic_dec(&buffer->record_disabled);
1358 return -ENOMEM;
1359
1360 /*
1361 * Something went totally wrong, and we are too paranoid
1362 * to even clean up the mess.
1363 */
1364 out_fail:
1365 put_online_cpus();
1366 mutex_unlock(&buffer->mutex);
1367 atomic_dec(&buffer->record_disabled);
1368 return -1;
1369 }
1370 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1371
1372 static inline void *
1373 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1374 {
1375 return bpage->data + index;
1376 }
1377
1378 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1379 {
1380 return bpage->page->data + index;
1381 }
1382
1383 static inline struct ring_buffer_event *
1384 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1385 {
1386 return __rb_page_index(cpu_buffer->reader_page,
1387 cpu_buffer->reader_page->read);
1388 }
1389
1390 static inline struct ring_buffer_event *
1391 rb_iter_head_event(struct ring_buffer_iter *iter)
1392 {
1393 return __rb_page_index(iter->head_page, iter->head);
1394 }
1395
1396 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1397 {
1398 return local_read(&bpage->write) & RB_WRITE_MASK;
1399 }
1400
1401 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1402 {
1403 return local_read(&bpage->page->commit);
1404 }
1405
1406 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1407 {
1408 return local_read(&bpage->entries) & RB_WRITE_MASK;
1409 }
1410
1411 /* Size is determined by what has been commited */
1412 static inline unsigned rb_page_size(struct buffer_page *bpage)
1413 {
1414 return rb_page_commit(bpage);
1415 }
1416
1417 static inline unsigned
1418 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1419 {
1420 return rb_page_commit(cpu_buffer->commit_page);
1421 }
1422
1423 static inline unsigned
1424 rb_event_index(struct ring_buffer_event *event)
1425 {
1426 unsigned long addr = (unsigned long)event;
1427
1428 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1429 }
1430
1431 static inline int
1432 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1433 struct ring_buffer_event *event)
1434 {
1435 unsigned long addr = (unsigned long)event;
1436 unsigned long index;
1437
1438 index = rb_event_index(event);
1439 addr &= PAGE_MASK;
1440
1441 return cpu_buffer->commit_page->page == (void *)addr &&
1442 rb_commit_index(cpu_buffer) == index;
1443 }
1444
1445 static void
1446 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1447 {
1448 unsigned long max_count;
1449
1450 /*
1451 * We only race with interrupts and NMIs on this CPU.
1452 * If we own the commit event, then we can commit
1453 * all others that interrupted us, since the interruptions
1454 * are in stack format (they finish before they come
1455 * back to us). This allows us to do a simple loop to
1456 * assign the commit to the tail.
1457 */
1458 again:
1459 max_count = cpu_buffer->buffer->pages * 100;
1460
1461 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1462 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1463 return;
1464 if (RB_WARN_ON(cpu_buffer,
1465 rb_is_reader_page(cpu_buffer->tail_page)))
1466 return;
1467 local_set(&cpu_buffer->commit_page->page->commit,
1468 rb_page_write(cpu_buffer->commit_page));
1469 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1470 cpu_buffer->write_stamp =
1471 cpu_buffer->commit_page->page->time_stamp;
1472 /* add barrier to keep gcc from optimizing too much */
1473 barrier();
1474 }
1475 while (rb_commit_index(cpu_buffer) !=
1476 rb_page_write(cpu_buffer->commit_page)) {
1477
1478 local_set(&cpu_buffer->commit_page->page->commit,
1479 rb_page_write(cpu_buffer->commit_page));
1480 RB_WARN_ON(cpu_buffer,
1481 local_read(&cpu_buffer->commit_page->page->commit) &
1482 ~RB_WRITE_MASK);
1483 barrier();
1484 }
1485
1486 /* again, keep gcc from optimizing */
1487 barrier();
1488
1489 /*
1490 * If an interrupt came in just after the first while loop
1491 * and pushed the tail page forward, we will be left with
1492 * a dangling commit that will never go forward.
1493 */
1494 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1495 goto again;
1496 }
1497
1498 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1499 {
1500 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1501 cpu_buffer->reader_page->read = 0;
1502 }
1503
1504 static void rb_inc_iter(struct ring_buffer_iter *iter)
1505 {
1506 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1507
1508 /*
1509 * The iterator could be on the reader page (it starts there).
1510 * But the head could have moved, since the reader was
1511 * found. Check for this case and assign the iterator
1512 * to the head page instead of next.
1513 */
1514 if (iter->head_page == cpu_buffer->reader_page)
1515 iter->head_page = rb_set_head_page(cpu_buffer);
1516 else
1517 rb_inc_page(cpu_buffer, &iter->head_page);
1518
1519 iter->read_stamp = iter->head_page->page->time_stamp;
1520 iter->head = 0;
1521 }
1522
1523 /**
1524 * ring_buffer_update_event - update event type and data
1525 * @event: the even to update
1526 * @type: the type of event
1527 * @length: the size of the event field in the ring buffer
1528 *
1529 * Update the type and data fields of the event. The length
1530 * is the actual size that is written to the ring buffer,
1531 * and with this, we can determine what to place into the
1532 * data field.
1533 */
1534 static void
1535 rb_update_event(struct ring_buffer_event *event,
1536 unsigned type, unsigned length)
1537 {
1538 event->type_len = type;
1539
1540 switch (type) {
1541
1542 case RINGBUF_TYPE_PADDING:
1543 case RINGBUF_TYPE_TIME_EXTEND:
1544 case RINGBUF_TYPE_TIME_STAMP:
1545 break;
1546
1547 case 0:
1548 length -= RB_EVNT_HDR_SIZE;
1549 if (length > RB_MAX_SMALL_DATA)
1550 event->array[0] = length;
1551 else
1552 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1553 break;
1554 default:
1555 BUG();
1556 }
1557 }
1558
1559 /*
1560 * rb_handle_head_page - writer hit the head page
1561 *
1562 * Returns: +1 to retry page
1563 * 0 to continue
1564 * -1 on error
1565 */
1566 static int
1567 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1568 struct buffer_page *tail_page,
1569 struct buffer_page *next_page)
1570 {
1571 struct buffer_page *new_head;
1572 int entries;
1573 int type;
1574 int ret;
1575
1576 entries = rb_page_entries(next_page);
1577
1578 /*
1579 * The hard part is here. We need to move the head
1580 * forward, and protect against both readers on
1581 * other CPUs and writers coming in via interrupts.
1582 */
1583 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1584 RB_PAGE_HEAD);
1585
1586 /*
1587 * type can be one of four:
1588 * NORMAL - an interrupt already moved it for us
1589 * HEAD - we are the first to get here.
1590 * UPDATE - we are the interrupt interrupting
1591 * a current move.
1592 * MOVED - a reader on another CPU moved the next
1593 * pointer to its reader page. Give up
1594 * and try again.
1595 */
1596
1597 switch (type) {
1598 case RB_PAGE_HEAD:
1599 /*
1600 * We changed the head to UPDATE, thus
1601 * it is our responsibility to update
1602 * the counters.
1603 */
1604 local_add(entries, &cpu_buffer->overrun);
1605
1606 /*
1607 * The entries will be zeroed out when we move the
1608 * tail page.
1609 */
1610
1611 /* still more to do */
1612 break;
1613
1614 case RB_PAGE_UPDATE:
1615 /*
1616 * This is an interrupt that interrupt the
1617 * previous update. Still more to do.
1618 */
1619 break;
1620 case RB_PAGE_NORMAL:
1621 /*
1622 * An interrupt came in before the update
1623 * and processed this for us.
1624 * Nothing left to do.
1625 */
1626 return 1;
1627 case RB_PAGE_MOVED:
1628 /*
1629 * The reader is on another CPU and just did
1630 * a swap with our next_page.
1631 * Try again.
1632 */
1633 return 1;
1634 default:
1635 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1636 return -1;
1637 }
1638
1639 /*
1640 * Now that we are here, the old head pointer is
1641 * set to UPDATE. This will keep the reader from
1642 * swapping the head page with the reader page.
1643 * The reader (on another CPU) will spin till
1644 * we are finished.
1645 *
1646 * We just need to protect against interrupts
1647 * doing the job. We will set the next pointer
1648 * to HEAD. After that, we set the old pointer
1649 * to NORMAL, but only if it was HEAD before.
1650 * otherwise we are an interrupt, and only
1651 * want the outer most commit to reset it.
1652 */
1653 new_head = next_page;
1654 rb_inc_page(cpu_buffer, &new_head);
1655
1656 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1657 RB_PAGE_NORMAL);
1658
1659 /*
1660 * Valid returns are:
1661 * HEAD - an interrupt came in and already set it.
1662 * NORMAL - One of two things:
1663 * 1) We really set it.
1664 * 2) A bunch of interrupts came in and moved
1665 * the page forward again.
1666 */
1667 switch (ret) {
1668 case RB_PAGE_HEAD:
1669 case RB_PAGE_NORMAL:
1670 /* OK */
1671 break;
1672 default:
1673 RB_WARN_ON(cpu_buffer, 1);
1674 return -1;
1675 }
1676
1677 /*
1678 * It is possible that an interrupt came in,
1679 * set the head up, then more interrupts came in
1680 * and moved it again. When we get back here,
1681 * the page would have been set to NORMAL but we
1682 * just set it back to HEAD.
1683 *
1684 * How do you detect this? Well, if that happened
1685 * the tail page would have moved.
1686 */
1687 if (ret == RB_PAGE_NORMAL) {
1688 /*
1689 * If the tail had moved passed next, then we need
1690 * to reset the pointer.
1691 */
1692 if (cpu_buffer->tail_page != tail_page &&
1693 cpu_buffer->tail_page != next_page)
1694 rb_head_page_set_normal(cpu_buffer, new_head,
1695 next_page,
1696 RB_PAGE_HEAD);
1697 }
1698
1699 /*
1700 * If this was the outer most commit (the one that
1701 * changed the original pointer from HEAD to UPDATE),
1702 * then it is up to us to reset it to NORMAL.
1703 */
1704 if (type == RB_PAGE_HEAD) {
1705 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1706 tail_page,
1707 RB_PAGE_UPDATE);
1708 if (RB_WARN_ON(cpu_buffer,
1709 ret != RB_PAGE_UPDATE))
1710 return -1;
1711 }
1712
1713 return 0;
1714 }
1715
1716 static unsigned rb_calculate_event_length(unsigned length)
1717 {
1718 struct ring_buffer_event event; /* Used only for sizeof array */
1719
1720 /* zero length can cause confusions */
1721 if (!length)
1722 length = 1;
1723
1724 if (length > RB_MAX_SMALL_DATA)
1725 length += sizeof(event.array[0]);
1726
1727 length += RB_EVNT_HDR_SIZE;
1728 length = ALIGN(length, RB_ALIGNMENT);
1729
1730 return length;
1731 }
1732
1733 static inline void
1734 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1735 struct buffer_page *tail_page,
1736 unsigned long tail, unsigned long length)
1737 {
1738 struct ring_buffer_event *event;
1739
1740 /*
1741 * Only the event that crossed the page boundary
1742 * must fill the old tail_page with padding.
1743 */
1744 if (tail >= BUF_PAGE_SIZE) {
1745 local_sub(length, &tail_page->write);
1746 return;
1747 }
1748
1749 event = __rb_page_index(tail_page, tail);
1750 kmemcheck_annotate_bitfield(event, bitfield);
1751
1752 /*
1753 * If this event is bigger than the minimum size, then
1754 * we need to be careful that we don't subtract the
1755 * write counter enough to allow another writer to slip
1756 * in on this page.
1757 * We put in a discarded commit instead, to make sure
1758 * that this space is not used again.
1759 *
1760 * If we are less than the minimum size, we don't need to
1761 * worry about it.
1762 */
1763 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1764 /* No room for any events */
1765
1766 /* Mark the rest of the page with padding */
1767 rb_event_set_padding(event);
1768
1769 /* Set the write back to the previous setting */
1770 local_sub(length, &tail_page->write);
1771 return;
1772 }
1773
1774 /* Put in a discarded event */
1775 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1776 event->type_len = RINGBUF_TYPE_PADDING;
1777 /* time delta must be non zero */
1778 event->time_delta = 1;
1779
1780 /* Set write to end of buffer */
1781 length = (tail + length) - BUF_PAGE_SIZE;
1782 local_sub(length, &tail_page->write);
1783 }
1784
1785 static struct ring_buffer_event *
1786 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787 unsigned long length, unsigned long tail,
1788 struct buffer_page *tail_page, u64 *ts)
1789 {
1790 struct buffer_page *commit_page = cpu_buffer->commit_page;
1791 struct ring_buffer *buffer = cpu_buffer->buffer;
1792 struct buffer_page *next_page;
1793 int ret;
1794
1795 next_page = tail_page;
1796
1797 rb_inc_page(cpu_buffer, &next_page);
1798
1799 /*
1800 * If for some reason, we had an interrupt storm that made
1801 * it all the way around the buffer, bail, and warn
1802 * about it.
1803 */
1804 if (unlikely(next_page == commit_page)) {
1805 local_inc(&cpu_buffer->commit_overrun);
1806 goto out_reset;
1807 }
1808
1809 /*
1810 * This is where the fun begins!
1811 *
1812 * We are fighting against races between a reader that
1813 * could be on another CPU trying to swap its reader
1814 * page with the buffer head.
1815 *
1816 * We are also fighting against interrupts coming in and
1817 * moving the head or tail on us as well.
1818 *
1819 * If the next page is the head page then we have filled
1820 * the buffer, unless the commit page is still on the
1821 * reader page.
1822 */
1823 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1824
1825 /*
1826 * If the commit is not on the reader page, then
1827 * move the header page.
1828 */
1829 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1830 /*
1831 * If we are not in overwrite mode,
1832 * this is easy, just stop here.
1833 */
1834 if (!(buffer->flags & RB_FL_OVERWRITE))
1835 goto out_reset;
1836
1837 ret = rb_handle_head_page(cpu_buffer,
1838 tail_page,
1839 next_page);
1840 if (ret < 0)
1841 goto out_reset;
1842 if (ret)
1843 goto out_again;
1844 } else {
1845 /*
1846 * We need to be careful here too. The
1847 * commit page could still be on the reader
1848 * page. We could have a small buffer, and
1849 * have filled up the buffer with events
1850 * from interrupts and such, and wrapped.
1851 *
1852 * Note, if the tail page is also the on the
1853 * reader_page, we let it move out.
1854 */
1855 if (unlikely((cpu_buffer->commit_page !=
1856 cpu_buffer->tail_page) &&
1857 (cpu_buffer->commit_page ==
1858 cpu_buffer->reader_page))) {
1859 local_inc(&cpu_buffer->commit_overrun);
1860 goto out_reset;
1861 }
1862 }
1863 }
1864
1865 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1866 if (ret) {
1867 /*
1868 * Nested commits always have zero deltas, so
1869 * just reread the time stamp
1870 */
1871 *ts = rb_time_stamp(buffer);
1872 next_page->page->time_stamp = *ts;
1873 }
1874
1875 out_again:
1876
1877 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1878
1879 /* fail and let the caller try again */
1880 return ERR_PTR(-EAGAIN);
1881
1882 out_reset:
1883 /* reset write */
1884 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1885
1886 return NULL;
1887 }
1888
1889 static struct ring_buffer_event *
1890 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1891 unsigned type, unsigned long length, u64 *ts)
1892 {
1893 struct buffer_page *tail_page;
1894 struct ring_buffer_event *event;
1895 unsigned long tail, write;
1896
1897 tail_page = cpu_buffer->tail_page;
1898 write = local_add_return(length, &tail_page->write);
1899
1900 /* set write to only the index of the write */
1901 write &= RB_WRITE_MASK;
1902 tail = write - length;
1903
1904 /* See if we shot pass the end of this buffer page */
1905 if (write > BUF_PAGE_SIZE)
1906 return rb_move_tail(cpu_buffer, length, tail,
1907 tail_page, ts);
1908
1909 /* We reserved something on the buffer */
1910
1911 event = __rb_page_index(tail_page, tail);
1912 kmemcheck_annotate_bitfield(event, bitfield);
1913 rb_update_event(event, type, length);
1914
1915 /* The passed in type is zero for DATA */
1916 if (likely(!type))
1917 local_inc(&tail_page->entries);
1918
1919 /*
1920 * If this is the first commit on the page, then update
1921 * its timestamp.
1922 */
1923 if (!tail)
1924 tail_page->page->time_stamp = *ts;
1925
1926 return event;
1927 }
1928
1929 static inline int
1930 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1931 struct ring_buffer_event *event)
1932 {
1933 unsigned long new_index, old_index;
1934 struct buffer_page *bpage;
1935 unsigned long index;
1936 unsigned long addr;
1937
1938 new_index = rb_event_index(event);
1939 old_index = new_index + rb_event_length(event);
1940 addr = (unsigned long)event;
1941 addr &= PAGE_MASK;
1942
1943 bpage = cpu_buffer->tail_page;
1944
1945 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1946 unsigned long write_mask =
1947 local_read(&bpage->write) & ~RB_WRITE_MASK;
1948 /*
1949 * This is on the tail page. It is possible that
1950 * a write could come in and move the tail page
1951 * and write to the next page. That is fine
1952 * because we just shorten what is on this page.
1953 */
1954 old_index += write_mask;
1955 new_index += write_mask;
1956 index = local_cmpxchg(&bpage->write, old_index, new_index);
1957 if (index == old_index)
1958 return 1;
1959 }
1960
1961 /* could not discard */
1962 return 0;
1963 }
1964
1965 static int
1966 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1967 u64 *ts, u64 *delta)
1968 {
1969 struct ring_buffer_event *event;
1970 static int once;
1971 int ret;
1972
1973 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1974 printk(KERN_WARNING "Delta way too big! %llu"
1975 " ts=%llu write stamp = %llu\n",
1976 (unsigned long long)*delta,
1977 (unsigned long long)*ts,
1978 (unsigned long long)cpu_buffer->write_stamp);
1979 WARN_ON(1);
1980 }
1981
1982 /*
1983 * The delta is too big, we to add a
1984 * new timestamp.
1985 */
1986 event = __rb_reserve_next(cpu_buffer,
1987 RINGBUF_TYPE_TIME_EXTEND,
1988 RB_LEN_TIME_EXTEND,
1989 ts);
1990 if (!event)
1991 return -EBUSY;
1992
1993 if (PTR_ERR(event) == -EAGAIN)
1994 return -EAGAIN;
1995
1996 /* Only a commited time event can update the write stamp */
1997 if (rb_event_is_commit(cpu_buffer, event)) {
1998 /*
1999 * If this is the first on the page, then it was
2000 * updated with the page itself. Try to discard it
2001 * and if we can't just make it zero.
2002 */
2003 if (rb_event_index(event)) {
2004 event->time_delta = *delta & TS_MASK;
2005 event->array[0] = *delta >> TS_SHIFT;
2006 } else {
2007 /* try to discard, since we do not need this */
2008 if (!rb_try_to_discard(cpu_buffer, event)) {
2009 /* nope, just zero it */
2010 event->time_delta = 0;
2011 event->array[0] = 0;
2012 }
2013 }
2014 cpu_buffer->write_stamp = *ts;
2015 /* let the caller know this was the commit */
2016 ret = 1;
2017 } else {
2018 /* Try to discard the event */
2019 if (!rb_try_to_discard(cpu_buffer, event)) {
2020 /* Darn, this is just wasted space */
2021 event->time_delta = 0;
2022 event->array[0] = 0;
2023 }
2024 ret = 0;
2025 }
2026
2027 *delta = 0;
2028
2029 return ret;
2030 }
2031
2032 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2033 {
2034 local_inc(&cpu_buffer->committing);
2035 local_inc(&cpu_buffer->commits);
2036 }
2037
2038 static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2039 {
2040 unsigned long commits;
2041
2042 if (RB_WARN_ON(cpu_buffer,
2043 !local_read(&cpu_buffer->committing)))
2044 return;
2045
2046 again:
2047 commits = local_read(&cpu_buffer->commits);
2048 /* synchronize with interrupts */
2049 barrier();
2050 if (local_read(&cpu_buffer->committing) == 1)
2051 rb_set_commit_to_write(cpu_buffer);
2052
2053 local_dec(&cpu_buffer->committing);
2054
2055 /* synchronize with interrupts */
2056 barrier();
2057
2058 /*
2059 * Need to account for interrupts coming in between the
2060 * updating of the commit page and the clearing of the
2061 * committing counter.
2062 */
2063 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2064 !local_read(&cpu_buffer->committing)) {
2065 local_inc(&cpu_buffer->committing);
2066 goto again;
2067 }
2068 }
2069
2070 static struct ring_buffer_event *
2071 rb_reserve_next_event(struct ring_buffer *buffer,
2072 struct ring_buffer_per_cpu *cpu_buffer,
2073 unsigned long length)
2074 {
2075 struct ring_buffer_event *event;
2076 u64 ts, delta = 0;
2077 int commit = 0;
2078 int nr_loops = 0;
2079
2080 rb_start_commit(cpu_buffer);
2081
2082 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2083 /*
2084 * Due to the ability to swap a cpu buffer from a buffer
2085 * it is possible it was swapped before we committed.
2086 * (committing stops a swap). We check for it here and
2087 * if it happened, we have to fail the write.
2088 */
2089 barrier();
2090 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2091 local_dec(&cpu_buffer->committing);
2092 local_dec(&cpu_buffer->commits);
2093 return NULL;
2094 }
2095 #endif
2096
2097 length = rb_calculate_event_length(length);
2098 again:
2099 /*
2100 * We allow for interrupts to reenter here and do a trace.
2101 * If one does, it will cause this original code to loop
2102 * back here. Even with heavy interrupts happening, this
2103 * should only happen a few times in a row. If this happens
2104 * 1000 times in a row, there must be either an interrupt
2105 * storm or we have something buggy.
2106 * Bail!
2107 */
2108 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2109 goto out_fail;
2110
2111 ts = rb_time_stamp(cpu_buffer->buffer);
2112
2113 /*
2114 * Only the first commit can update the timestamp.
2115 * Yes there is a race here. If an interrupt comes in
2116 * just after the conditional and it traces too, then it
2117 * will also check the deltas. More than one timestamp may
2118 * also be made. But only the entry that did the actual
2119 * commit will be something other than zero.
2120 */
2121 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2122 rb_page_write(cpu_buffer->tail_page) ==
2123 rb_commit_index(cpu_buffer))) {
2124 u64 diff;
2125
2126 diff = ts - cpu_buffer->write_stamp;
2127
2128 /* make sure this diff is calculated here */
2129 barrier();
2130
2131 /* Did the write stamp get updated already? */
2132 if (unlikely(ts < cpu_buffer->write_stamp))
2133 goto get_event;
2134
2135 delta = diff;
2136 if (unlikely(test_time_stamp(delta))) {
2137
2138 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
2139 if (commit == -EBUSY)
2140 goto out_fail;
2141
2142 if (commit == -EAGAIN)
2143 goto again;
2144
2145 RB_WARN_ON(cpu_buffer, commit < 0);
2146 }
2147 }
2148
2149 get_event:
2150 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
2151 if (unlikely(PTR_ERR(event) == -EAGAIN))
2152 goto again;
2153
2154 if (!event)
2155 goto out_fail;
2156
2157 if (!rb_event_is_commit(cpu_buffer, event))
2158 delta = 0;
2159
2160 event->time_delta = delta;
2161
2162 return event;
2163
2164 out_fail:
2165 rb_end_commit(cpu_buffer);
2166 return NULL;
2167 }
2168
2169 #ifdef CONFIG_TRACING
2170
2171 #define TRACE_RECURSIVE_DEPTH 16
2172
2173 static int trace_recursive_lock(void)
2174 {
2175 current->trace_recursion++;
2176
2177 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2178 return 0;
2179
2180 /* Disable all tracing before we do anything else */
2181 tracing_off_permanent();
2182
2183 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2184 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2185 current->trace_recursion,
2186 hardirq_count() >> HARDIRQ_SHIFT,
2187 softirq_count() >> SOFTIRQ_SHIFT,
2188 in_nmi());
2189
2190 WARN_ON_ONCE(1);
2191 return -1;
2192 }
2193
2194 static void trace_recursive_unlock(void)
2195 {
2196 WARN_ON_ONCE(!current->trace_recursion);
2197
2198 current->trace_recursion--;
2199 }
2200
2201 #else
2202
2203 #define trace_recursive_lock() (0)
2204 #define trace_recursive_unlock() do { } while (0)
2205
2206 #endif
2207
2208 static DEFINE_PER_CPU(int, rb_need_resched);
2209
2210 /**
2211 * ring_buffer_lock_reserve - reserve a part of the buffer
2212 * @buffer: the ring buffer to reserve from
2213 * @length: the length of the data to reserve (excluding event header)
2214 *
2215 * Returns a reseverd event on the ring buffer to copy directly to.
2216 * The user of this interface will need to get the body to write into
2217 * and can use the ring_buffer_event_data() interface.
2218 *
2219 * The length is the length of the data needed, not the event length
2220 * which also includes the event header.
2221 *
2222 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2223 * If NULL is returned, then nothing has been allocated or locked.
2224 */
2225 struct ring_buffer_event *
2226 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2227 {
2228 struct ring_buffer_per_cpu *cpu_buffer;
2229 struct ring_buffer_event *event;
2230 int cpu, resched;
2231
2232 if (ring_buffer_flags != RB_BUFFERS_ON)
2233 return NULL;
2234
2235 if (atomic_read(&buffer->record_disabled))
2236 return NULL;
2237
2238 /* If we are tracing schedule, we don't want to recurse */
2239 resched = ftrace_preempt_disable();
2240
2241 if (trace_recursive_lock())
2242 goto out_nocheck;
2243
2244 cpu = raw_smp_processor_id();
2245
2246 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2247 goto out;
2248
2249 cpu_buffer = buffer->buffers[cpu];
2250
2251 if (atomic_read(&cpu_buffer->record_disabled))
2252 goto out;
2253
2254 if (length > BUF_MAX_DATA_SIZE)
2255 goto out;
2256
2257 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2258 if (!event)
2259 goto out;
2260
2261 /*
2262 * Need to store resched state on this cpu.
2263 * Only the first needs to.
2264 */
2265
2266 if (preempt_count() == 1)
2267 per_cpu(rb_need_resched, cpu) = resched;
2268
2269 return event;
2270
2271 out:
2272 trace_recursive_unlock();
2273
2274 out_nocheck:
2275 ftrace_preempt_enable(resched);
2276 return NULL;
2277 }
2278 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2279
2280 static void
2281 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2282 struct ring_buffer_event *event)
2283 {
2284 /*
2285 * The event first in the commit queue updates the
2286 * time stamp.
2287 */
2288 if (rb_event_is_commit(cpu_buffer, event))
2289 cpu_buffer->write_stamp += event->time_delta;
2290 }
2291
2292 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2293 struct ring_buffer_event *event)
2294 {
2295 local_inc(&cpu_buffer->entries);
2296 rb_update_write_stamp(cpu_buffer, event);
2297 rb_end_commit(cpu_buffer);
2298 }
2299
2300 /**
2301 * ring_buffer_unlock_commit - commit a reserved
2302 * @buffer: The buffer to commit to
2303 * @event: The event pointer to commit.
2304 *
2305 * This commits the data to the ring buffer, and releases any locks held.
2306 *
2307 * Must be paired with ring_buffer_lock_reserve.
2308 */
2309 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2310 struct ring_buffer_event *event)
2311 {
2312 struct ring_buffer_per_cpu *cpu_buffer;
2313 int cpu = raw_smp_processor_id();
2314
2315 cpu_buffer = buffer->buffers[cpu];
2316
2317 rb_commit(cpu_buffer, event);
2318
2319 trace_recursive_unlock();
2320
2321 /*
2322 * Only the last preempt count needs to restore preemption.
2323 */
2324 if (preempt_count() == 1)
2325 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2326 else
2327 preempt_enable_no_resched_notrace();
2328
2329 return 0;
2330 }
2331 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2332
2333 static inline void rb_event_discard(struct ring_buffer_event *event)
2334 {
2335 /* array[0] holds the actual length for the discarded event */
2336 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2337 event->type_len = RINGBUF_TYPE_PADDING;
2338 /* time delta must be non zero */
2339 if (!event->time_delta)
2340 event->time_delta = 1;
2341 }
2342
2343 /*
2344 * Decrement the entries to the page that an event is on.
2345 * The event does not even need to exist, only the pointer
2346 * to the page it is on. This may only be called before the commit
2347 * takes place.
2348 */
2349 static inline void
2350 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2351 struct ring_buffer_event *event)
2352 {
2353 unsigned long addr = (unsigned long)event;
2354 struct buffer_page *bpage = cpu_buffer->commit_page;
2355 struct buffer_page *start;
2356
2357 addr &= PAGE_MASK;
2358
2359 /* Do the likely case first */
2360 if (likely(bpage->page == (void *)addr)) {
2361 local_dec(&bpage->entries);
2362 return;
2363 }
2364
2365 /*
2366 * Because the commit page may be on the reader page we
2367 * start with the next page and check the end loop there.
2368 */
2369 rb_inc_page(cpu_buffer, &bpage);
2370 start = bpage;
2371 do {
2372 if (bpage->page == (void *)addr) {
2373 local_dec(&bpage->entries);
2374 return;
2375 }
2376 rb_inc_page(cpu_buffer, &bpage);
2377 } while (bpage != start);
2378
2379 /* commit not part of this buffer?? */
2380 RB_WARN_ON(cpu_buffer, 1);
2381 }
2382
2383 /**
2384 * ring_buffer_commit_discard - discard an event that has not been committed
2385 * @buffer: the ring buffer
2386 * @event: non committed event to discard
2387 *
2388 * Sometimes an event that is in the ring buffer needs to be ignored.
2389 * This function lets the user discard an event in the ring buffer
2390 * and then that event will not be read later.
2391 *
2392 * This function only works if it is called before the the item has been
2393 * committed. It will try to free the event from the ring buffer
2394 * if another event has not been added behind it.
2395 *
2396 * If another event has been added behind it, it will set the event
2397 * up as discarded, and perform the commit.
2398 *
2399 * If this function is called, do not call ring_buffer_unlock_commit on
2400 * the event.
2401 */
2402 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2403 struct ring_buffer_event *event)
2404 {
2405 struct ring_buffer_per_cpu *cpu_buffer;
2406 int cpu;
2407
2408 /* The event is discarded regardless */
2409 rb_event_discard(event);
2410
2411 cpu = smp_processor_id();
2412 cpu_buffer = buffer->buffers[cpu];
2413
2414 /*
2415 * This must only be called if the event has not been
2416 * committed yet. Thus we can assume that preemption
2417 * is still disabled.
2418 */
2419 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2420
2421 rb_decrement_entry(cpu_buffer, event);
2422 if (rb_try_to_discard(cpu_buffer, event))
2423 goto out;
2424
2425 /*
2426 * The commit is still visible by the reader, so we
2427 * must still update the timestamp.
2428 */
2429 rb_update_write_stamp(cpu_buffer, event);
2430 out:
2431 rb_end_commit(cpu_buffer);
2432
2433 trace_recursive_unlock();
2434
2435 /*
2436 * Only the last preempt count needs to restore preemption.
2437 */
2438 if (preempt_count() == 1)
2439 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2440 else
2441 preempt_enable_no_resched_notrace();
2442
2443 }
2444 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2445
2446 /**
2447 * ring_buffer_write - write data to the buffer without reserving
2448 * @buffer: The ring buffer to write to.
2449 * @length: The length of the data being written (excluding the event header)
2450 * @data: The data to write to the buffer.
2451 *
2452 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2453 * one function. If you already have the data to write to the buffer, it
2454 * may be easier to simply call this function.
2455 *
2456 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2457 * and not the length of the event which would hold the header.
2458 */
2459 int ring_buffer_write(struct ring_buffer *buffer,
2460 unsigned long length,
2461 void *data)
2462 {
2463 struct ring_buffer_per_cpu *cpu_buffer;
2464 struct ring_buffer_event *event;
2465 void *body;
2466 int ret = -EBUSY;
2467 int cpu, resched;
2468
2469 if (ring_buffer_flags != RB_BUFFERS_ON)
2470 return -EBUSY;
2471
2472 if (atomic_read(&buffer->record_disabled))
2473 return -EBUSY;
2474
2475 resched = ftrace_preempt_disable();
2476
2477 cpu = raw_smp_processor_id();
2478
2479 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2480 goto out;
2481
2482 cpu_buffer = buffer->buffers[cpu];
2483
2484 if (atomic_read(&cpu_buffer->record_disabled))
2485 goto out;
2486
2487 if (length > BUF_MAX_DATA_SIZE)
2488 goto out;
2489
2490 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2491 if (!event)
2492 goto out;
2493
2494 body = rb_event_data(event);
2495
2496 memcpy(body, data, length);
2497
2498 rb_commit(cpu_buffer, event);
2499
2500 ret = 0;
2501 out:
2502 ftrace_preempt_enable(resched);
2503
2504 return ret;
2505 }
2506 EXPORT_SYMBOL_GPL(ring_buffer_write);
2507
2508 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2509 {
2510 struct buffer_page *reader = cpu_buffer->reader_page;
2511 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2512 struct buffer_page *commit = cpu_buffer->commit_page;
2513
2514 /* In case of error, head will be NULL */
2515 if (unlikely(!head))
2516 return 1;
2517
2518 return reader->read == rb_page_commit(reader) &&
2519 (commit == reader ||
2520 (commit == head &&
2521 head->read == rb_page_commit(commit)));
2522 }
2523
2524 /**
2525 * ring_buffer_record_disable - stop all writes into the buffer
2526 * @buffer: The ring buffer to stop writes to.
2527 *
2528 * This prevents all writes to the buffer. Any attempt to write
2529 * to the buffer after this will fail and return NULL.
2530 *
2531 * The caller should call synchronize_sched() after this.
2532 */
2533 void ring_buffer_record_disable(struct ring_buffer *buffer)
2534 {
2535 atomic_inc(&buffer->record_disabled);
2536 }
2537 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2538
2539 /**
2540 * ring_buffer_record_enable - enable writes to the buffer
2541 * @buffer: The ring buffer to enable writes
2542 *
2543 * Note, multiple disables will need the same number of enables
2544 * to truely enable the writing (much like preempt_disable).
2545 */
2546 void ring_buffer_record_enable(struct ring_buffer *buffer)
2547 {
2548 atomic_dec(&buffer->record_disabled);
2549 }
2550 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2551
2552 /**
2553 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2554 * @buffer: The ring buffer to stop writes to.
2555 * @cpu: The CPU buffer to stop
2556 *
2557 * This prevents all writes to the buffer. Any attempt to write
2558 * to the buffer after this will fail and return NULL.
2559 *
2560 * The caller should call synchronize_sched() after this.
2561 */
2562 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2563 {
2564 struct ring_buffer_per_cpu *cpu_buffer;
2565
2566 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2567 return;
2568
2569 cpu_buffer = buffer->buffers[cpu];
2570 atomic_inc(&cpu_buffer->record_disabled);
2571 }
2572 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2573
2574 /**
2575 * ring_buffer_record_enable_cpu - enable writes to the buffer
2576 * @buffer: The ring buffer to enable writes
2577 * @cpu: The CPU to enable.
2578 *
2579 * Note, multiple disables will need the same number of enables
2580 * to truely enable the writing (much like preempt_disable).
2581 */
2582 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2583 {
2584 struct ring_buffer_per_cpu *cpu_buffer;
2585
2586 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2587 return;
2588
2589 cpu_buffer = buffer->buffers[cpu];
2590 atomic_dec(&cpu_buffer->record_disabled);
2591 }
2592 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2593
2594 /**
2595 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2596 * @buffer: The ring buffer
2597 * @cpu: The per CPU buffer to get the entries from.
2598 */
2599 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2600 {
2601 struct ring_buffer_per_cpu *cpu_buffer;
2602 unsigned long ret;
2603
2604 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2605 return 0;
2606
2607 cpu_buffer = buffer->buffers[cpu];
2608 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
2609 - cpu_buffer->read;
2610
2611 return ret;
2612 }
2613 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2614
2615 /**
2616 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2617 * @buffer: The ring buffer
2618 * @cpu: The per CPU buffer to get the number of overruns from
2619 */
2620 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2621 {
2622 struct ring_buffer_per_cpu *cpu_buffer;
2623 unsigned long ret;
2624
2625 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2626 return 0;
2627
2628 cpu_buffer = buffer->buffers[cpu];
2629 ret = local_read(&cpu_buffer->overrun);
2630
2631 return ret;
2632 }
2633 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2634
2635 /**
2636 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2637 * @buffer: The ring buffer
2638 * @cpu: The per CPU buffer to get the number of overruns from
2639 */
2640 unsigned long
2641 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2642 {
2643 struct ring_buffer_per_cpu *cpu_buffer;
2644 unsigned long ret;
2645
2646 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2647 return 0;
2648
2649 cpu_buffer = buffer->buffers[cpu];
2650 ret = local_read(&cpu_buffer->commit_overrun);
2651
2652 return ret;
2653 }
2654 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2655
2656 /**
2657 * ring_buffer_entries - get the number of entries in a buffer
2658 * @buffer: The ring buffer
2659 *
2660 * Returns the total number of entries in the ring buffer
2661 * (all CPU entries)
2662 */
2663 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2664 {
2665 struct ring_buffer_per_cpu *cpu_buffer;
2666 unsigned long entries = 0;
2667 int cpu;
2668
2669 /* if you care about this being correct, lock the buffer */
2670 for_each_buffer_cpu(buffer, cpu) {
2671 cpu_buffer = buffer->buffers[cpu];
2672 entries += (local_read(&cpu_buffer->entries) -
2673 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
2674 }
2675
2676 return entries;
2677 }
2678 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2679
2680 /**
2681 * ring_buffer_overruns - get the number of overruns in buffer
2682 * @buffer: The ring buffer
2683 *
2684 * Returns the total number of overruns in the ring buffer
2685 * (all CPU entries)
2686 */
2687 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2688 {
2689 struct ring_buffer_per_cpu *cpu_buffer;
2690 unsigned long overruns = 0;
2691 int cpu;
2692
2693 /* if you care about this being correct, lock the buffer */
2694 for_each_buffer_cpu(buffer, cpu) {
2695 cpu_buffer = buffer->buffers[cpu];
2696 overruns += local_read(&cpu_buffer->overrun);
2697 }
2698
2699 return overruns;
2700 }
2701 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2702
2703 static void rb_iter_reset(struct ring_buffer_iter *iter)
2704 {
2705 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2706
2707 /* Iterator usage is expected to have record disabled */
2708 if (list_empty(&cpu_buffer->reader_page->list)) {
2709 iter->head_page = rb_set_head_page(cpu_buffer);
2710 if (unlikely(!iter->head_page))
2711 return;
2712 iter->head = iter->head_page->read;
2713 } else {
2714 iter->head_page = cpu_buffer->reader_page;
2715 iter->head = cpu_buffer->reader_page->read;
2716 }
2717 if (iter->head)
2718 iter->read_stamp = cpu_buffer->read_stamp;
2719 else
2720 iter->read_stamp = iter->head_page->page->time_stamp;
2721 iter->cache_reader_page = cpu_buffer->reader_page;
2722 iter->cache_read = cpu_buffer->read;
2723 }
2724
2725 /**
2726 * ring_buffer_iter_reset - reset an iterator
2727 * @iter: The iterator to reset
2728 *
2729 * Resets the iterator, so that it will start from the beginning
2730 * again.
2731 */
2732 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2733 {
2734 struct ring_buffer_per_cpu *cpu_buffer;
2735 unsigned long flags;
2736
2737 if (!iter)
2738 return;
2739
2740 cpu_buffer = iter->cpu_buffer;
2741
2742 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2743 rb_iter_reset(iter);
2744 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2745 }
2746 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2747
2748 /**
2749 * ring_buffer_iter_empty - check if an iterator has no more to read
2750 * @iter: The iterator to check
2751 */
2752 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2753 {
2754 struct ring_buffer_per_cpu *cpu_buffer;
2755
2756 cpu_buffer = iter->cpu_buffer;
2757
2758 return iter->head_page == cpu_buffer->commit_page &&
2759 iter->head == rb_commit_index(cpu_buffer);
2760 }
2761 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2762
2763 static void
2764 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2765 struct ring_buffer_event *event)
2766 {
2767 u64 delta;
2768
2769 switch (event->type_len) {
2770 case RINGBUF_TYPE_PADDING:
2771 return;
2772
2773 case RINGBUF_TYPE_TIME_EXTEND:
2774 delta = event->array[0];
2775 delta <<= TS_SHIFT;
2776 delta += event->time_delta;
2777 cpu_buffer->read_stamp += delta;
2778 return;
2779
2780 case RINGBUF_TYPE_TIME_STAMP:
2781 /* FIXME: not implemented */
2782 return;
2783
2784 case RINGBUF_TYPE_DATA:
2785 cpu_buffer->read_stamp += event->time_delta;
2786 return;
2787
2788 default:
2789 BUG();
2790 }
2791 return;
2792 }
2793
2794 static void
2795 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2796 struct ring_buffer_event *event)
2797 {
2798 u64 delta;
2799
2800 switch (event->type_len) {
2801 case RINGBUF_TYPE_PADDING:
2802 return;
2803
2804 case RINGBUF_TYPE_TIME_EXTEND:
2805 delta = event->array[0];
2806 delta <<= TS_SHIFT;
2807 delta += event->time_delta;
2808 iter->read_stamp += delta;
2809 return;
2810
2811 case RINGBUF_TYPE_TIME_STAMP:
2812 /* FIXME: not implemented */
2813 return;
2814
2815 case RINGBUF_TYPE_DATA:
2816 iter->read_stamp += event->time_delta;
2817 return;
2818
2819 default:
2820 BUG();
2821 }
2822 return;
2823 }
2824
2825 static struct buffer_page *
2826 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2827 {
2828 struct buffer_page *reader = NULL;
2829 unsigned long flags;
2830 int nr_loops = 0;
2831 int ret;
2832
2833 local_irq_save(flags);
2834 arch_spin_lock(&cpu_buffer->lock);
2835
2836 again:
2837 /*
2838 * This should normally only loop twice. But because the
2839 * start of the reader inserts an empty page, it causes
2840 * a case where we will loop three times. There should be no
2841 * reason to loop four times (that I know of).
2842 */
2843 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2844 reader = NULL;
2845 goto out;
2846 }
2847
2848 reader = cpu_buffer->reader_page;
2849
2850 /* If there's more to read, return this page */
2851 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2852 goto out;
2853
2854 /* Never should we have an index greater than the size */
2855 if (RB_WARN_ON(cpu_buffer,
2856 cpu_buffer->reader_page->read > rb_page_size(reader)))
2857 goto out;
2858
2859 /* check if we caught up to the tail */
2860 reader = NULL;
2861 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2862 goto out;
2863
2864 /*
2865 * Reset the reader page to size zero.
2866 */
2867 local_set(&cpu_buffer->reader_page->write, 0);
2868 local_set(&cpu_buffer->reader_page->entries, 0);
2869 local_set(&cpu_buffer->reader_page->page->commit, 0);
2870
2871 spin:
2872 /*
2873 * Splice the empty reader page into the list around the head.
2874 */
2875 reader = rb_set_head_page(cpu_buffer);
2876 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2877 cpu_buffer->reader_page->list.prev = reader->list.prev;
2878
2879 /*
2880 * cpu_buffer->pages just needs to point to the buffer, it
2881 * has no specific buffer page to point to. Lets move it out
2882 * of our way so we don't accidently swap it.
2883 */
2884 cpu_buffer->pages = reader->list.prev;
2885
2886 /* The reader page will be pointing to the new head */
2887 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2888
2889 /*
2890 * Here's the tricky part.
2891 *
2892 * We need to move the pointer past the header page.
2893 * But we can only do that if a writer is not currently
2894 * moving it. The page before the header page has the
2895 * flag bit '1' set if it is pointing to the page we want.
2896 * but if the writer is in the process of moving it
2897 * than it will be '2' or already moved '0'.
2898 */
2899
2900 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2901
2902 /*
2903 * If we did not convert it, then we must try again.
2904 */
2905 if (!ret)
2906 goto spin;
2907
2908 /*
2909 * Yeah! We succeeded in replacing the page.
2910 *
2911 * Now make the new head point back to the reader page.
2912 */
2913 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2914 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2915
2916 /* Finally update the reader page to the new head */
2917 cpu_buffer->reader_page = reader;
2918 rb_reset_reader_page(cpu_buffer);
2919
2920 goto again;
2921
2922 out:
2923 arch_spin_unlock(&cpu_buffer->lock);
2924 local_irq_restore(flags);
2925
2926 return reader;
2927 }
2928
2929 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2930 {
2931 struct ring_buffer_event *event;
2932 struct buffer_page *reader;
2933 unsigned length;
2934
2935 reader = rb_get_reader_page(cpu_buffer);
2936
2937 /* This function should not be called when buffer is empty */
2938 if (RB_WARN_ON(cpu_buffer, !reader))
2939 return;
2940
2941 event = rb_reader_event(cpu_buffer);
2942
2943 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
2944 cpu_buffer->read++;
2945
2946 rb_update_read_stamp(cpu_buffer, event);
2947
2948 length = rb_event_length(event);
2949 cpu_buffer->reader_page->read += length;
2950 }
2951
2952 static void rb_advance_iter(struct ring_buffer_iter *iter)
2953 {
2954 struct ring_buffer *buffer;
2955 struct ring_buffer_per_cpu *cpu_buffer;
2956 struct ring_buffer_event *event;
2957 unsigned length;
2958
2959 cpu_buffer = iter->cpu_buffer;
2960 buffer = cpu_buffer->buffer;
2961
2962 /*
2963 * Check if we are at the end of the buffer.
2964 */
2965 if (iter->head >= rb_page_size(iter->head_page)) {
2966 /* discarded commits can make the page empty */
2967 if (iter->head_page == cpu_buffer->commit_page)
2968 return;
2969 rb_inc_iter(iter);
2970 return;
2971 }
2972
2973 event = rb_iter_head_event(iter);
2974
2975 length = rb_event_length(event);
2976
2977 /*
2978 * This should not be called to advance the header if we are
2979 * at the tail of the buffer.
2980 */
2981 if (RB_WARN_ON(cpu_buffer,
2982 (iter->head_page == cpu_buffer->commit_page) &&
2983 (iter->head + length > rb_commit_index(cpu_buffer))))
2984 return;
2985
2986 rb_update_iter_read_stamp(iter, event);
2987
2988 iter->head += length;
2989
2990 /* check for end of page padding */
2991 if ((iter->head >= rb_page_size(iter->head_page)) &&
2992 (iter->head_page != cpu_buffer->commit_page))
2993 rb_advance_iter(iter);
2994 }
2995
2996 static struct ring_buffer_event *
2997 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
2998 {
2999 struct ring_buffer_event *event;
3000 struct buffer_page *reader;
3001 int nr_loops = 0;
3002
3003 again:
3004 /*
3005 * We repeat when a timestamp is encountered. It is possible
3006 * to get multiple timestamps from an interrupt entering just
3007 * as one timestamp is about to be written, or from discarded
3008 * commits. The most that we can have is the number on a single page.
3009 */
3010 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3011 return NULL;
3012
3013 reader = rb_get_reader_page(cpu_buffer);
3014 if (!reader)
3015 return NULL;
3016
3017 event = rb_reader_event(cpu_buffer);
3018
3019 switch (event->type_len) {
3020 case RINGBUF_TYPE_PADDING:
3021 if (rb_null_event(event))
3022 RB_WARN_ON(cpu_buffer, 1);
3023 /*
3024 * Because the writer could be discarding every
3025 * event it creates (which would probably be bad)
3026 * if we were to go back to "again" then we may never
3027 * catch up, and will trigger the warn on, or lock
3028 * the box. Return the padding, and we will release
3029 * the current locks, and try again.
3030 */
3031 return event;
3032
3033 case RINGBUF_TYPE_TIME_EXTEND:
3034 /* Internal data, OK to advance */
3035 rb_advance_reader(cpu_buffer);
3036 goto again;
3037
3038 case RINGBUF_TYPE_TIME_STAMP:
3039 /* FIXME: not implemented */
3040 rb_advance_reader(cpu_buffer);
3041 goto again;
3042
3043 case RINGBUF_TYPE_DATA:
3044 if (ts) {
3045 *ts = cpu_buffer->read_stamp + event->time_delta;
3046 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3047 cpu_buffer->cpu, ts);
3048 }
3049 return event;
3050
3051 default:
3052 BUG();
3053 }
3054
3055 return NULL;
3056 }
3057 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3058
3059 static struct ring_buffer_event *
3060 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3061 {
3062 struct ring_buffer *buffer;
3063 struct ring_buffer_per_cpu *cpu_buffer;
3064 struct ring_buffer_event *event;
3065 int nr_loops = 0;
3066
3067 if (ring_buffer_iter_empty(iter))
3068 return NULL;
3069
3070 cpu_buffer = iter->cpu_buffer;
3071 buffer = cpu_buffer->buffer;
3072
3073 /*
3074 * Check if someone performed a consuming read to
3075 * the buffer. A consuming read invalidates the iterator
3076 * and we need to reset the iterator in this case.
3077 */
3078 if (unlikely(iter->cache_read != cpu_buffer->read ||
3079 iter->cache_reader_page != cpu_buffer->reader_page))
3080 rb_iter_reset(iter);
3081
3082 again:
3083 /*
3084 * We repeat when a timestamp is encountered.
3085 * We can get multiple timestamps by nested interrupts or also
3086 * if filtering is on (discarding commits). Since discarding
3087 * commits can be frequent we can get a lot of timestamps.
3088 * But we limit them by not adding timestamps if they begin
3089 * at the start of a page.
3090 */
3091 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3092 return NULL;
3093
3094 if (rb_per_cpu_empty(cpu_buffer))
3095 return NULL;
3096
3097 event = rb_iter_head_event(iter);
3098
3099 switch (event->type_len) {
3100 case RINGBUF_TYPE_PADDING:
3101 if (rb_null_event(event)) {
3102 rb_inc_iter(iter);
3103 goto again;
3104 }
3105 rb_advance_iter(iter);
3106 return event;
3107
3108 case RINGBUF_TYPE_TIME_EXTEND:
3109 /* Internal data, OK to advance */
3110 rb_advance_iter(iter);
3111 goto again;
3112
3113 case RINGBUF_TYPE_TIME_STAMP:
3114 /* FIXME: not implemented */
3115 rb_advance_iter(iter);
3116 goto again;
3117
3118 case RINGBUF_TYPE_DATA:
3119 if (ts) {
3120 *ts = iter->read_stamp + event->time_delta;
3121 ring_buffer_normalize_time_stamp(buffer,
3122 cpu_buffer->cpu, ts);
3123 }
3124 return event;
3125
3126 default:
3127 BUG();
3128 }
3129
3130 return NULL;
3131 }
3132 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3133
3134 static inline int rb_ok_to_lock(void)
3135 {
3136 /*
3137 * If an NMI die dumps out the content of the ring buffer
3138 * do not grab locks. We also permanently disable the ring
3139 * buffer too. A one time deal is all you get from reading
3140 * the ring buffer from an NMI.
3141 */
3142 if (likely(!in_nmi()))
3143 return 1;
3144
3145 tracing_off_permanent();
3146 return 0;
3147 }
3148
3149 /**
3150 * ring_buffer_peek - peek at the next event to be read
3151 * @buffer: The ring buffer to read
3152 * @cpu: The cpu to peak at
3153 * @ts: The timestamp counter of this event.
3154 *
3155 * This will return the event that will be read next, but does
3156 * not consume the data.
3157 */
3158 struct ring_buffer_event *
3159 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3160 {
3161 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3162 struct ring_buffer_event *event;
3163 unsigned long flags;
3164 int dolock;
3165
3166 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3167 return NULL;
3168
3169 dolock = rb_ok_to_lock();
3170 again:
3171 local_irq_save(flags);
3172 if (dolock)
3173 spin_lock(&cpu_buffer->reader_lock);
3174 event = rb_buffer_peek(cpu_buffer, ts);
3175 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3176 rb_advance_reader(cpu_buffer);
3177 if (dolock)
3178 spin_unlock(&cpu_buffer->reader_lock);
3179 local_irq_restore(flags);
3180
3181 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3182 goto again;
3183
3184 return event;
3185 }
3186
3187 /**
3188 * ring_buffer_iter_peek - peek at the next event to be read
3189 * @iter: The ring buffer iterator
3190 * @ts: The timestamp counter of this event.
3191 *
3192 * This will return the event that will be read next, but does
3193 * not increment the iterator.
3194 */
3195 struct ring_buffer_event *
3196 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3197 {
3198 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3199 struct ring_buffer_event *event;
3200 unsigned long flags;
3201
3202 again:
3203 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3204 event = rb_iter_peek(iter, ts);
3205 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3206
3207 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3208 goto again;
3209
3210 return event;
3211 }
3212
3213 /**
3214 * ring_buffer_consume - return an event and consume it
3215 * @buffer: The ring buffer to get the next event from
3216 *
3217 * Returns the next event in the ring buffer, and that event is consumed.
3218 * Meaning, that sequential reads will keep returning a different event,
3219 * and eventually empty the ring buffer if the producer is slower.
3220 */
3221 struct ring_buffer_event *
3222 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3223 {
3224 struct ring_buffer_per_cpu *cpu_buffer;
3225 struct ring_buffer_event *event = NULL;
3226 unsigned long flags;
3227 int dolock;
3228
3229 dolock = rb_ok_to_lock();
3230
3231 again:
3232 /* might be called in atomic */
3233 preempt_disable();
3234
3235 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3236 goto out;
3237
3238 cpu_buffer = buffer->buffers[cpu];
3239 local_irq_save(flags);
3240 if (dolock)
3241 spin_lock(&cpu_buffer->reader_lock);
3242
3243 event = rb_buffer_peek(cpu_buffer, ts);
3244 if (event)
3245 rb_advance_reader(cpu_buffer);
3246
3247 if (dolock)
3248 spin_unlock(&cpu_buffer->reader_lock);
3249 local_irq_restore(flags);
3250
3251 out:
3252 preempt_enable();
3253
3254 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3255 goto again;
3256
3257 return event;
3258 }
3259 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3260
3261 /**
3262 * ring_buffer_read_start - start a non consuming read of the buffer
3263 * @buffer: The ring buffer to read from
3264 * @cpu: The cpu buffer to iterate over
3265 *
3266 * This starts up an iteration through the buffer. It also disables
3267 * the recording to the buffer until the reading is finished.
3268 * This prevents the reading from being corrupted. This is not
3269 * a consuming read, so a producer is not expected.
3270 *
3271 * Must be paired with ring_buffer_finish.
3272 */
3273 struct ring_buffer_iter *
3274 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3275 {
3276 struct ring_buffer_per_cpu *cpu_buffer;
3277 struct ring_buffer_iter *iter;
3278 unsigned long flags;
3279
3280 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3281 return NULL;
3282
3283 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3284 if (!iter)
3285 return NULL;
3286
3287 cpu_buffer = buffer->buffers[cpu];
3288
3289 iter->cpu_buffer = cpu_buffer;
3290
3291 atomic_inc(&cpu_buffer->record_disabled);
3292 synchronize_sched();
3293
3294 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3295 arch_spin_lock(&cpu_buffer->lock);
3296 rb_iter_reset(iter);
3297 arch_spin_unlock(&cpu_buffer->lock);
3298 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3299
3300 return iter;
3301 }
3302 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3303
3304 /**
3305 * ring_buffer_finish - finish reading the iterator of the buffer
3306 * @iter: The iterator retrieved by ring_buffer_start
3307 *
3308 * This re-enables the recording to the buffer, and frees the
3309 * iterator.
3310 */
3311 void
3312 ring_buffer_read_finish(struct ring_buffer_iter *iter)
3313 {
3314 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3315
3316 atomic_dec(&cpu_buffer->record_disabled);
3317 kfree(iter);
3318 }
3319 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3320
3321 /**
3322 * ring_buffer_read - read the next item in the ring buffer by the iterator
3323 * @iter: The ring buffer iterator
3324 * @ts: The time stamp of the event read.
3325 *
3326 * This reads the next event in the ring buffer and increments the iterator.
3327 */
3328 struct ring_buffer_event *
3329 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3330 {
3331 struct ring_buffer_event *event;
3332 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3333 unsigned long flags;
3334
3335 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3336 again:
3337 event = rb_iter_peek(iter, ts);
3338 if (!event)
3339 goto out;
3340
3341 if (event->type_len == RINGBUF_TYPE_PADDING)
3342 goto again;
3343
3344 rb_advance_iter(iter);
3345 out:
3346 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3347
3348 return event;
3349 }
3350 EXPORT_SYMBOL_GPL(ring_buffer_read);
3351
3352 /**
3353 * ring_buffer_size - return the size of the ring buffer (in bytes)
3354 * @buffer: The ring buffer.
3355 */
3356 unsigned long ring_buffer_size(struct ring_buffer *buffer)
3357 {
3358 return BUF_PAGE_SIZE * buffer->pages;
3359 }
3360 EXPORT_SYMBOL_GPL(ring_buffer_size);
3361
3362 static void
3363 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3364 {
3365 rb_head_page_deactivate(cpu_buffer);
3366
3367 cpu_buffer->head_page
3368 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3369 local_set(&cpu_buffer->head_page->write, 0);
3370 local_set(&cpu_buffer->head_page->entries, 0);
3371 local_set(&cpu_buffer->head_page->page->commit, 0);
3372
3373 cpu_buffer->head_page->read = 0;
3374
3375 cpu_buffer->tail_page = cpu_buffer->head_page;
3376 cpu_buffer->commit_page = cpu_buffer->head_page;
3377
3378 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3379 local_set(&cpu_buffer->reader_page->write, 0);
3380 local_set(&cpu_buffer->reader_page->entries, 0);
3381 local_set(&cpu_buffer->reader_page->page->commit, 0);
3382 cpu_buffer->reader_page->read = 0;
3383
3384 local_set(&cpu_buffer->commit_overrun, 0);
3385 local_set(&cpu_buffer->overrun, 0);
3386 local_set(&cpu_buffer->entries, 0);
3387 local_set(&cpu_buffer->committing, 0);
3388 local_set(&cpu_buffer->commits, 0);
3389 cpu_buffer->read = 0;
3390
3391 cpu_buffer->write_stamp = 0;
3392 cpu_buffer->read_stamp = 0;
3393
3394 rb_head_page_activate(cpu_buffer);
3395 }
3396
3397 /**
3398 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3399 * @buffer: The ring buffer to reset a per cpu buffer of
3400 * @cpu: The CPU buffer to be reset
3401 */
3402 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3403 {
3404 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3405 unsigned long flags;
3406
3407 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3408 return;
3409
3410 atomic_inc(&cpu_buffer->record_disabled);
3411
3412 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3413
3414 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3415 goto out;
3416
3417 arch_spin_lock(&cpu_buffer->lock);
3418
3419 rb_reset_cpu(cpu_buffer);
3420
3421 arch_spin_unlock(&cpu_buffer->lock);
3422
3423 out:
3424 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3425
3426 atomic_dec(&cpu_buffer->record_disabled);
3427 }
3428 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3429
3430 /**
3431 * ring_buffer_reset - reset a ring buffer
3432 * @buffer: The ring buffer to reset all cpu buffers
3433 */
3434 void ring_buffer_reset(struct ring_buffer *buffer)
3435 {
3436 int cpu;
3437
3438 for_each_buffer_cpu(buffer, cpu)
3439 ring_buffer_reset_cpu(buffer, cpu);
3440 }
3441 EXPORT_SYMBOL_GPL(ring_buffer_reset);
3442
3443 /**
3444 * rind_buffer_empty - is the ring buffer empty?
3445 * @buffer: The ring buffer to test
3446 */
3447 int ring_buffer_empty(struct ring_buffer *buffer)
3448 {
3449 struct ring_buffer_per_cpu *cpu_buffer;
3450 unsigned long flags;
3451 int dolock;
3452 int cpu;
3453 int ret;
3454
3455 dolock = rb_ok_to_lock();
3456
3457 /* yes this is racy, but if you don't like the race, lock the buffer */
3458 for_each_buffer_cpu(buffer, cpu) {
3459 cpu_buffer = buffer->buffers[cpu];
3460 local_irq_save(flags);
3461 if (dolock)
3462 spin_lock(&cpu_buffer->reader_lock);
3463 ret = rb_per_cpu_empty(cpu_buffer);
3464 if (dolock)
3465 spin_unlock(&cpu_buffer->reader_lock);
3466 local_irq_restore(flags);
3467
3468 if (!ret)
3469 return 0;
3470 }
3471
3472 return 1;
3473 }
3474 EXPORT_SYMBOL_GPL(ring_buffer_empty);
3475
3476 /**
3477 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3478 * @buffer: The ring buffer
3479 * @cpu: The CPU buffer to test
3480 */
3481 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3482 {
3483 struct ring_buffer_per_cpu *cpu_buffer;
3484 unsigned long flags;
3485 int dolock;
3486 int ret;
3487
3488 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3489 return 1;
3490
3491 dolock = rb_ok_to_lock();
3492
3493 cpu_buffer = buffer->buffers[cpu];
3494 local_irq_save(flags);
3495 if (dolock)
3496 spin_lock(&cpu_buffer->reader_lock);
3497 ret = rb_per_cpu_empty(cpu_buffer);
3498 if (dolock)
3499 spin_unlock(&cpu_buffer->reader_lock);
3500 local_irq_restore(flags);
3501
3502 return ret;
3503 }
3504 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3505
3506 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3507 /**
3508 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3509 * @buffer_a: One buffer to swap with
3510 * @buffer_b: The other buffer to swap with
3511 *
3512 * This function is useful for tracers that want to take a "snapshot"
3513 * of a CPU buffer and has another back up buffer lying around.
3514 * it is expected that the tracer handles the cpu buffer not being
3515 * used at the moment.
3516 */
3517 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3518 struct ring_buffer *buffer_b, int cpu)
3519 {
3520 struct ring_buffer_per_cpu *cpu_buffer_a;
3521 struct ring_buffer_per_cpu *cpu_buffer_b;
3522 int ret = -EINVAL;
3523
3524 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3525 !cpumask_test_cpu(cpu, buffer_b->cpumask))
3526 goto out;
3527
3528 /* At least make sure the two buffers are somewhat the same */
3529 if (buffer_a->pages != buffer_b->pages)
3530 goto out;
3531
3532 ret = -EAGAIN;
3533
3534 if (ring_buffer_flags != RB_BUFFERS_ON)
3535 goto out;
3536
3537 if (atomic_read(&buffer_a->record_disabled))
3538 goto out;
3539
3540 if (atomic_read(&buffer_b->record_disabled))
3541 goto out;
3542
3543 cpu_buffer_a = buffer_a->buffers[cpu];
3544 cpu_buffer_b = buffer_b->buffers[cpu];
3545
3546 if (atomic_read(&cpu_buffer_a->record_disabled))
3547 goto out;
3548
3549 if (atomic_read(&cpu_buffer_b->record_disabled))
3550 goto out;
3551
3552 /*
3553 * We can't do a synchronize_sched here because this
3554 * function can be called in atomic context.
3555 * Normally this will be called from the same CPU as cpu.
3556 * If not it's up to the caller to protect this.
3557 */
3558 atomic_inc(&cpu_buffer_a->record_disabled);
3559 atomic_inc(&cpu_buffer_b->record_disabled);
3560
3561 ret = -EBUSY;
3562 if (local_read(&cpu_buffer_a->committing))
3563 goto out_dec;
3564 if (local_read(&cpu_buffer_b->committing))
3565 goto out_dec;
3566
3567 buffer_a->buffers[cpu] = cpu_buffer_b;
3568 buffer_b->buffers[cpu] = cpu_buffer_a;
3569
3570 cpu_buffer_b->buffer = buffer_a;
3571 cpu_buffer_a->buffer = buffer_b;
3572
3573 ret = 0;
3574
3575 out_dec:
3576 atomic_dec(&cpu_buffer_a->record_disabled);
3577 atomic_dec(&cpu_buffer_b->record_disabled);
3578 out:
3579 return ret;
3580 }
3581 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3582 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
3583
3584 /**
3585 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3586 * @buffer: the buffer to allocate for.
3587 *
3588 * This function is used in conjunction with ring_buffer_read_page.
3589 * When reading a full page from the ring buffer, these functions
3590 * can be used to speed up the process. The calling function should
3591 * allocate a few pages first with this function. Then when it
3592 * needs to get pages from the ring buffer, it passes the result
3593 * of this function into ring_buffer_read_page, which will swap
3594 * the page that was allocated, with the read page of the buffer.
3595 *
3596 * Returns:
3597 * The page allocated, or NULL on error.
3598 */
3599 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3600 {
3601 struct buffer_data_page *bpage;
3602 unsigned long addr;
3603
3604 addr = __get_free_page(GFP_KERNEL);
3605 if (!addr)
3606 return NULL;
3607
3608 bpage = (void *)addr;
3609
3610 rb_init_page(bpage);
3611
3612 return bpage;
3613 }
3614 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
3615
3616 /**
3617 * ring_buffer_free_read_page - free an allocated read page
3618 * @buffer: the buffer the page was allocate for
3619 * @data: the page to free
3620 *
3621 * Free a page allocated from ring_buffer_alloc_read_page.
3622 */
3623 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3624 {
3625 free_page((unsigned long)data);
3626 }
3627 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
3628
3629 /**
3630 * ring_buffer_read_page - extract a page from the ring buffer
3631 * @buffer: buffer to extract from
3632 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
3633 * @len: amount to extract
3634 * @cpu: the cpu of the buffer to extract
3635 * @full: should the extraction only happen when the page is full.
3636 *
3637 * This function will pull out a page from the ring buffer and consume it.
3638 * @data_page must be the address of the variable that was returned
3639 * from ring_buffer_alloc_read_page. This is because the page might be used
3640 * to swap with a page in the ring buffer.
3641 *
3642 * for example:
3643 * rpage = ring_buffer_alloc_read_page(buffer);
3644 * if (!rpage)
3645 * return error;
3646 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3647 * if (ret >= 0)
3648 * process_page(rpage, ret);
3649 *
3650 * When @full is set, the function will not return true unless
3651 * the writer is off the reader page.
3652 *
3653 * Note: it is up to the calling functions to handle sleeps and wakeups.
3654 * The ring buffer can be used anywhere in the kernel and can not
3655 * blindly call wake_up. The layer that uses the ring buffer must be
3656 * responsible for that.
3657 *
3658 * Returns:
3659 * >=0 if data has been transferred, returns the offset of consumed data.
3660 * <0 if no data has been transferred.
3661 */
3662 int ring_buffer_read_page(struct ring_buffer *buffer,
3663 void **data_page, size_t len, int cpu, int full)
3664 {
3665 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3666 struct ring_buffer_event *event;
3667 struct buffer_data_page *bpage;
3668 struct buffer_page *reader;
3669 unsigned long flags;
3670 unsigned int commit;
3671 unsigned int read;
3672 u64 save_timestamp;
3673 int ret = -1;
3674
3675 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3676 goto out;
3677
3678 /*
3679 * If len is not big enough to hold the page header, then
3680 * we can not copy anything.
3681 */
3682 if (len <= BUF_PAGE_HDR_SIZE)
3683 goto out;
3684
3685 len -= BUF_PAGE_HDR_SIZE;
3686
3687 if (!data_page)
3688 goto out;
3689
3690 bpage = *data_page;
3691 if (!bpage)
3692 goto out;
3693
3694 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3695
3696 reader = rb_get_reader_page(cpu_buffer);
3697 if (!reader)
3698 goto out_unlock;
3699
3700 event = rb_reader_event(cpu_buffer);
3701
3702 read = reader->read;
3703 commit = rb_page_commit(reader);
3704
3705 /*
3706 * If this page has been partially read or
3707 * if len is not big enough to read the rest of the page or
3708 * a writer is still on the page, then
3709 * we must copy the data from the page to the buffer.
3710 * Otherwise, we can simply swap the page with the one passed in.
3711 */
3712 if (read || (len < (commit - read)) ||
3713 cpu_buffer->reader_page == cpu_buffer->commit_page) {
3714 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3715 unsigned int rpos = read;
3716 unsigned int pos = 0;
3717 unsigned int size;
3718
3719 if (full)
3720 goto out_unlock;
3721
3722 if (len > (commit - read))
3723 len = (commit - read);
3724
3725 size = rb_event_length(event);
3726
3727 if (len < size)
3728 goto out_unlock;
3729
3730 /* save the current timestamp, since the user will need it */
3731 save_timestamp = cpu_buffer->read_stamp;
3732
3733 /* Need to copy one event at a time */
3734 do {
3735 memcpy(bpage->data + pos, rpage->data + rpos, size);
3736
3737 len -= size;
3738
3739 rb_advance_reader(cpu_buffer);
3740 rpos = reader->read;
3741 pos += size;
3742
3743 event = rb_reader_event(cpu_buffer);
3744 size = rb_event_length(event);
3745 } while (len > size);
3746
3747 /* update bpage */
3748 local_set(&bpage->commit, pos);
3749 bpage->time_stamp = save_timestamp;
3750
3751 /* we copied everything to the beginning */
3752 read = 0;
3753 } else {
3754 /* update the entry counter */
3755 cpu_buffer->read += rb_page_entries(reader);
3756
3757 /* swap the pages */
3758 rb_init_page(bpage);
3759 bpage = reader->page;
3760 reader->page = *data_page;
3761 local_set(&reader->write, 0);
3762 local_set(&reader->entries, 0);
3763 reader->read = 0;
3764 *data_page = bpage;
3765 }
3766 ret = read;
3767
3768 out_unlock:
3769 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3770
3771 out:
3772 return ret;
3773 }
3774 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3775
3776 #ifdef CONFIG_TRACING
3777 static ssize_t
3778 rb_simple_read(struct file *filp, char __user *ubuf,
3779 size_t cnt, loff_t *ppos)
3780 {
3781 unsigned long *p = filp->private_data;
3782 char buf[64];
3783 int r;
3784
3785 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3786 r = sprintf(buf, "permanently disabled\n");
3787 else
3788 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3789
3790 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3791 }
3792
3793 static ssize_t
3794 rb_simple_write(struct file *filp, const char __user *ubuf,
3795 size_t cnt, loff_t *ppos)
3796 {
3797 unsigned long *p = filp->private_data;
3798 char buf[64];
3799 unsigned long val;
3800 int ret;
3801
3802 if (cnt >= sizeof(buf))
3803 return -EINVAL;
3804
3805 if (copy_from_user(&buf, ubuf, cnt))
3806 return -EFAULT;
3807
3808 buf[cnt] = 0;
3809
3810 ret = strict_strtoul(buf, 10, &val);
3811 if (ret < 0)
3812 return ret;
3813
3814 if (val)
3815 set_bit(RB_BUFFERS_ON_BIT, p);
3816 else
3817 clear_bit(RB_BUFFERS_ON_BIT, p);
3818
3819 (*ppos)++;
3820
3821 return cnt;
3822 }
3823
3824 static const struct file_operations rb_simple_fops = {
3825 .open = tracing_open_generic,
3826 .read = rb_simple_read,
3827 .write = rb_simple_write,
3828 };
3829
3830
3831 static __init int rb_init_debugfs(void)
3832 {
3833 struct dentry *d_tracer;
3834
3835 d_tracer = tracing_init_dentry();
3836
3837 trace_create_file("tracing_on", 0644, d_tracer,
3838 &ring_buffer_flags, &rb_simple_fops);
3839
3840 return 0;
3841 }
3842
3843 fs_initcall(rb_init_debugfs);
3844 #endif
3845
3846 #ifdef CONFIG_HOTPLUG_CPU
3847 static int rb_cpu_notify(struct notifier_block *self,
3848 unsigned long action, void *hcpu)
3849 {
3850 struct ring_buffer *buffer =
3851 container_of(self, struct ring_buffer, cpu_notify);
3852 long cpu = (long)hcpu;
3853
3854 switch (action) {
3855 case CPU_UP_PREPARE:
3856 case CPU_UP_PREPARE_FROZEN:
3857 if (cpumask_test_cpu(cpu, buffer->cpumask))
3858 return NOTIFY_OK;
3859
3860 buffer->buffers[cpu] =
3861 rb_allocate_cpu_buffer(buffer, cpu);
3862 if (!buffer->buffers[cpu]) {
3863 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3864 cpu);
3865 return NOTIFY_OK;
3866 }
3867 smp_wmb();
3868 cpumask_set_cpu(cpu, buffer->cpumask);
3869 break;
3870 case CPU_DOWN_PREPARE:
3871 case CPU_DOWN_PREPARE_FROZEN:
3872 /*
3873 * Do nothing.
3874 * If we were to free the buffer, then the user would
3875 * lose any trace that was in the buffer.
3876 */
3877 break;
3878 default:
3879 break;
3880 }
3881 return NOTIFY_OK;
3882 }
3883 #endif