Merge branch 'ras-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / include / linux / ring_buffer.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RING_BUFFER_H
3 #define _LINUX_RING_BUFFER_H
4
5 #include <linux/kmemcheck.h>
6 #include <linux/mm.h>
7 #include <linux/seq_file.h>
8 #include <linux/poll.h>
9
10 struct ring_buffer;
11 struct ring_buffer_iter;
12
13 /*
14 * Don't refer to this struct directly, use functions below.
15 */
16 struct ring_buffer_event {
17 kmemcheck_bitfield_begin(bitfield);
18 u32 type_len:5, time_delta:27;
19 kmemcheck_bitfield_end(bitfield);
20
21 u32 array[];
22 };
23
24 /**
25 * enum ring_buffer_type - internal ring buffer types
26 *
27 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
28 * If time_delta is 0:
29 * array is ignored
30 * size is variable depending on how much
31 * padding is needed
32 * If time_delta is non zero:
33 * array[0] holds the actual length
34 * size = 4 + length (bytes)
35 *
36 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
37 * array[0] = time delta (28 .. 59)
38 * size = 8 bytes
39 *
40 * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
41 * array[0] = tv_nsec
42 * array[1..2] = tv_sec
43 * size = 16 bytes
44 *
45 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
46 * Data record
47 * If type_len is zero:
48 * array[0] holds the actual length
49 * array[1..(length+3)/4] holds data
50 * size = 4 + length (bytes)
51 * else
52 * length = type_len << 2
53 * array[0..(length+3)/4-1] holds data
54 * size = 4 + length (bytes)
55 */
56 enum ring_buffer_type {
57 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
58 RINGBUF_TYPE_PADDING,
59 RINGBUF_TYPE_TIME_EXTEND,
60 /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
61 RINGBUF_TYPE_TIME_STAMP,
62 };
63
64 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
65 void *ring_buffer_event_data(struct ring_buffer_event *event);
66
67 /*
68 * ring_buffer_discard_commit will remove an event that has not
69 * ben committed yet. If this is used, then ring_buffer_unlock_commit
70 * must not be called on the discarded event. This function
71 * will try to remove the event from the ring buffer completely
72 * if another event has not been written after it.
73 *
74 * Example use:
75 *
76 * if (some_condition)
77 * ring_buffer_discard_commit(buffer, event);
78 * else
79 * ring_buffer_unlock_commit(buffer, event);
80 */
81 void ring_buffer_discard_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event);
83
84 /*
85 * size is in bytes for each per CPU buffer.
86 */
87 struct ring_buffer *
88 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
89
90 /*
91 * Because the ring buffer is generic, if other users of the ring buffer get
92 * traced by ftrace, it can produce lockdep warnings. We need to keep each
93 * ring buffer's lock class separate.
94 */
95 #define ring_buffer_alloc(size, flags) \
96 ({ \
97 static struct lock_class_key __key; \
98 __ring_buffer_alloc((size), (flags), &__key); \
99 })
100
101 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
102 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
103 struct file *filp, poll_table *poll_table);
104
105
106 #define RING_BUFFER_ALL_CPUS -1
107
108 void ring_buffer_free(struct ring_buffer *buffer);
109
110 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
111
112 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
113
114 struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
115 unsigned long length);
116 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
117 struct ring_buffer_event *event);
118 int ring_buffer_write(struct ring_buffer *buffer,
119 unsigned long length, void *data);
120
121 struct ring_buffer_event *
122 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
123 unsigned long *lost_events);
124 struct ring_buffer_event *
125 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
126 unsigned long *lost_events);
127
128 struct ring_buffer_iter *
129 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
130 void ring_buffer_read_prepare_sync(void);
131 void ring_buffer_read_start(struct ring_buffer_iter *iter);
132 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
133
134 struct ring_buffer_event *
135 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
136 struct ring_buffer_event *
137 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
138 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
139 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
140
141 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
142
143 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
144 void ring_buffer_reset(struct ring_buffer *buffer);
145
146 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
147 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
148 struct ring_buffer *buffer_b, int cpu);
149 #else
150 static inline int
151 ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
152 struct ring_buffer *buffer_b, int cpu)
153 {
154 return -ENODEV;
155 }
156 #endif
157
158 bool ring_buffer_empty(struct ring_buffer *buffer);
159 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
160
161 void ring_buffer_record_disable(struct ring_buffer *buffer);
162 void ring_buffer_record_enable(struct ring_buffer *buffer);
163 void ring_buffer_record_off(struct ring_buffer *buffer);
164 void ring_buffer_record_on(struct ring_buffer *buffer);
165 int ring_buffer_record_is_on(struct ring_buffer *buffer);
166 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
167 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
168
169 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
170 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
171 unsigned long ring_buffer_entries(struct ring_buffer *buffer);
172 unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
173 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
174 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
175 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
176 unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
177 unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
178
179 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
180 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
181 int cpu, u64 *ts);
182 void ring_buffer_set_clock(struct ring_buffer *buffer,
183 u64 (*clock)(void));
184
185 size_t ring_buffer_page_len(void *page);
186
187
188 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
189 void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
190 int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
191 size_t len, int cpu, int full);
192
193 struct trace_seq;
194
195 int ring_buffer_print_entry_header(struct trace_seq *s);
196 int ring_buffer_print_page_header(struct trace_seq *s);
197
198 enum ring_buffer_flags {
199 RB_FL_OVERWRITE = 1 << 0,
200 };
201
202 #ifdef CONFIG_RING_BUFFER
203 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
204 #else
205 #define trace_rb_cpu_prepare NULL
206 #endif
207
208 #endif /* _LINUX_RING_BUFFER_H */