24afb406ea7806e33de2ccac8698ed32c4250b35
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / misc / samsung / scsc / scsc_wifilogger_core.c
1 /******************************************************************************
2 *
3 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
4 *
5 ******************************************************************************/
6 /* Uses */
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mutex.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/time.h>
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <scsc/scsc_logring.h>
16
17 /* Implements */
18 #include "scsc_wifilogger_core.h"
19
20 static atomic_t next_ring_id;
21
22 static void wlog_drain_worker(struct work_struct *work)
23 {
24 struct scsc_wlog_ring *r;
25
26 r = container_of(work, struct scsc_wlog_ring, drain_work);
27
28 if (r && r->ops.drain_ring)
29 r->ops.drain_ring(r, r->flushing ? r->st.rb_byte_size : DEFAULT_DRAIN_CHUNK_SZ(r));
30 }
31
32 static void drain_timer_callback(unsigned long data)
33 {
34 struct scsc_wlog_ring *r = (struct scsc_wlog_ring *)data;
35
36 SCSC_TAG_DBG4(WLOG, "TIMER DRAIN : %p\n", r);
37 /* we should kick the workqueue here...no sleep */
38 queue_work(r->drain_workq, &r->drain_work);
39
40 if (r->st.verbose_level && r->max_interval_sec) {
41 mod_timer(&r->drain_timer,
42 jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
43 SCSC_TAG_DBG4(WLOG, "TIMER RELOADED !!!\n");
44 }
45 }
46
47 static int wlog_ring_init(struct scsc_wlog_ring *r)
48 {
49 /* Allocate buffer and spare area */
50 r->buf = kzalloc(r->st.rb_byte_size + MAX_RECORD_SZ, GFP_KERNEL);
51 if (!r->buf)
52 return -ENOMEM;
53 r->drain_sz = DRAIN_BUF_SZ;
54 r->drain_buf = kzalloc(r->drain_sz, GFP_KERNEL);
55 if (!r->drain_buf) {
56 kfree(r->buf);
57 return -ENOMEM;
58 }
59 mutex_init(&r->drain_lock);
60
61 r->drain_workq = create_workqueue("wifilogger");
62 INIT_WORK(&r->drain_work, wlog_drain_worker);
63 setup_timer(&r->drain_timer, drain_timer_callback, (unsigned long)r);
64
65 r->st.ring_id = atomic_read(&next_ring_id);
66 atomic_inc(&next_ring_id);
67
68 SCSC_TAG_DBG3(WLOG, "Workers initialized for ring[%p]: %s\n",
69 r, r->st.name);
70
71 return 0;
72 }
73
74 static void wlog_ring_finalize(struct scsc_wlog_ring *r)
75 {
76 if (!r)
77 return;
78
79 cancel_work_sync(&r->drain_work);
80 del_timer_sync(&r->drain_timer);
81 destroy_workqueue(r->drain_workq);
82
83 r->initialized = false;
84 kfree(r->drain_buf);
85 kfree(r->buf);
86 r->buf = NULL;
87 }
88
89 static wifi_error wlog_get_ring_status(struct scsc_wlog_ring *r,
90 struct scsc_wifi_ring_buffer_status *status)
91 {
92 if (!r || !status)
93 return WIFI_ERROR_INVALID_ARGS;
94 //TODO locking SRCU ?
95 *status = r->st;
96
97 return WIFI_SUCCESS;
98 }
99
100 static int wlog_read_records(struct scsc_wlog_ring *r, u8 *buf,
101 size_t blen, u32 *records,
102 struct scsc_wifi_ring_buffer_status *status)
103 {
104 u16 read_bytes = 0, rec_sz = 0;
105 u32 got_records = 0, req_records = -1;
106
107 if (scsc_wlog_ring_is_flushing(r))
108 return 0;
109
110 /**
111 * req_records has been loaded with a max u32 value by default
112 * on purpose...if a max number of records is provided in records
113 * update req_records accordingly
114 */
115 if (records)
116 req_records = *records;
117 /**
118 * We have ONLY ONE READER at any time that consumes data, impersonated
119 * here by the drain_ring drainer callback, whose read-ops are ensured
120 * atomic by the drain_lock mutex: this will guard against races
121 * between the periodic-drain worker and the threshold-drain procedure
122 * triggered by the write itself.
123 *
124 * But we want also to guard against any direct read_record invokation
125 * like in test rings via debugfs so we add a read spinlock: this last
126 * won't lead to any contention here anyway most of the time in a
127 * real scenario so the same reason we don't need either any irqsave
128 * spinlock version....so latency also is not impacted.
129 */
130 raw_spin_lock(&r->rlock);
131 while (!scsc_wlog_is_ring_empty(r) && got_records < req_records) {
132 rec_sz = REC_SZ(r, RPOS(r));
133 if (read_bytes + rec_sz > blen)
134 break;
135 /**
136 * Rollover is transparent on read...last written material in
137 * spare is still there...
138 */
139 memcpy(buf + read_bytes, REC_START(r, RPOS(r)), rec_sz);
140 read_bytes += rec_sz;
141 r->st.read_bytes += rec_sz;
142 got_records++;
143 }
144 if (status)
145 *status = r->st;
146 raw_spin_unlock(&r->rlock);
147
148 if (records)
149 *records = got_records;
150 SCSC_TAG_DBG4(WLOG, "BytesRead:%d -- RecordsRead:%d\n",
151 read_bytes, got_records);
152
153 return read_bytes;
154 }
155
156 static int wlog_default_ring_drainer(struct scsc_wlog_ring *r, size_t drain_sz)
157 {
158 int rval = 0, drained_bytes = 0;
159 size_t chunk_sz = drain_sz <= r->drain_sz ? drain_sz : r->drain_sz;
160 struct scsc_wifi_ring_buffer_status ring_status = {};
161
162 /* An SRCU on callback here would better */
163 mutex_lock(&r->drain_lock);
164 do {
165 /* drain ... consumes data */
166 rval = r->ops.read_records(r, r->drain_buf, chunk_sz, NULL, &ring_status);
167 /* and push...if any callback defined */
168 if (!r->flushing) {
169 mutex_lock(&r->wl->lock);
170 if (rval > 0 && r->wl->on_ring_buffer_data_cb) {
171 SCSC_TAG_DEBUG(WLOG,
172 "Invoking registered log_handler:%p to drain %d bytes\n",
173 r->wl->on_ring_buffer_data_cb, rval);
174 r->wl->on_ring_buffer_data_cb(r->st.name, r->drain_buf, rval,
175 &ring_status, r->wl->on_ring_buffer_ctx);
176 SCSC_TAG_DBG4(WLOG, "Callback processed %d bytes\n", rval);
177 }
178 mutex_unlock(&r->wl->lock);
179 }
180 drained_bytes += rval;
181 } while (rval && drained_bytes <= drain_sz);
182 SCSC_TAG_DBG3(WLOG, "%s %d bytes\n", (r->flushing) ? "Flushed" : "Drained",
183 drained_bytes);
184
185 /* Execute flush if required... */
186 if (r->flushing) {
187 unsigned long flags;
188
189 /* Inhibit writers momentarily */
190 raw_spin_lock_irqsave(&r->wlock, flags);
191 r->dropped = 0;
192 r->st.written_records = 0;
193 r->st.read_bytes = r->st.written_bytes = 0;
194 r->flushing = false;
195 raw_spin_unlock_irqrestore(&r->wlock, flags);
196 SCSC_TAG_INFO(WLOG, "Ring '%s' flushed.\n", r->st.name);
197 }
198 mutex_unlock(&r->drain_lock);
199
200 return drained_bytes;
201 }
202
203 /**
204 * A generic write that takes care to build the final payload created
205 * concatenating:
206 * - the common record-header
207 * - an optionally provided ring_hdr
208 * - the provided payload buf
209 *
210 * The optional header is passed down as a separate parameters to avoid
211 * unnecessary intermediate copies: this function will copy all the bits
212 * in place directly into the proper calculated ring position.
213 *
214 * By design a read-end-point is always provided by the framework
215 * (in terms of netlink channels towards the WiFi-HAL) so we spawn a
216 * configurable reader-worker upon start of logging, and the same reader
217 * is also invoked when ring is running out of space: for these reasons
218 * the ring is meant NOT to overwrite itself ever.
219 *
220 * If NO periodic reader is spawned NOR a min_data_size threshold was
221 * specified to force kick the periodic drainer, we could just end-up
222 * filling up the ring: in that case we just drop and account for it.
223 *
224 * Data is drained and pushed periodically upstream using the
225 * on_ring_buffer_data_cb if any provided and periodic drain was
226 * configured.
227 *
228 * @r: the referenced ring
229 * @buf: payload
230 * @blen: payload_sz
231 * @ring_hdr: upper-layer-record-header
232 * @hlen: upper-layer-record-header length
233 * @verbose_level: loglevel for this message (to be checked against)
234 * @timestamp: a providewd timestamp (if any). If zero a timestamp will be
235 * calculated.
236 *
237 * Final injected record will be composed as follows:
238 *
239 * |common_hdr|ring_hdr|buf|
240 *
241 * where the common header is compued and filled in by this function, and the
242 * provided additional upper-layer header ring_hdr could be not provided.
243 *
244 * THIS BASIC RING OPERATION IS THE WORKHORSE USED BY THE PRODUCER API IMPLEMENTED
245 * BY REAL RINGS, AND AS SUCH COULD BE INVOKED FROM ANY CONTEXTS...SO IT MUST NOT SLEEP.
246 */
247 static int wlog_write_record(struct scsc_wlog_ring *r, u8 *buf, size_t blen,
248 void *ring_hdr, size_t hlen, u32 verbose_level, u64 timestamp)
249 {
250 u8 *start = NULL;
251 u16 chunk_sz;
252 unsigned long flags;
253
254 if (scsc_wlog_ring_is_flushing(r))
255 return 0;
256
257 /* Just drop messages above configured verbose level. 0 is disabled */
258 if (!scsc_wlog_is_message_allowed(r, verbose_level))
259 return 0;
260
261 //TODO Account for missing timestamp
262 chunk_sz = sizeof(struct scsc_wifi_ring_buffer_entry) + hlen + blen;
263 if (chunk_sz > MAX_RECORD_SZ) {
264 SCSC_TAG_WARNING(WLOG, "Dropping record exceeding %d bytes\n",
265 chunk_sz);
266 return 0;
267 }
268
269 raw_spin_lock_irqsave(&r->wlock, flags);
270 /**
271 * Are there enough data to drain ?
272 * if so...drain...queueing work....
273 * if not (min_data_size == 0) just do nothing
274 */
275 if (!r->drop_on_full && r->min_data_size &&
276 AVAIL_BYTES(r) >= r->min_data_size)
277 queue_work(r->drain_workq, &r->drain_work);
278 /**
279 * If no min_data_size was specified, NOR a periodic read-worker
280 * was configured (i.e. max_interval_sec == 0), we could end up
281 * filling up the ring...in that case just drop...accounting for it.
282 *
283 * This is the case when packet_fate rings fills up...
284 */
285 if (!CAN_FIT(r, chunk_sz)) {
286 SCSC_TAG_DBG4(WLOG, "[%s]:: dropped %zd bytes\n",
287 r->st.name, blen + hlen);
288 r->dropped += blen + hlen;
289 raw_spin_unlock_irqrestore(&r->wlock, flags);
290 return 0;
291 }
292
293 start = REC_START(r, WPOS(r));
294 REC_HEADER_FILL(start, hlen + blen, timestamp, (u8)r->st.flags, r->type);
295 start += sizeof(struct scsc_wifi_ring_buffer_entry);
296 if (hlen) {
297 memcpy(start, ring_hdr, hlen);
298 start += hlen;
299 }
300 if (blen)
301 memcpy(start, buf, blen);
302 /* Account for rollover using spare area at end of ring... */
303 if (start + blen > BUF_END(r))
304 memcpy(BUF_START(r), BUF_END(r), start + blen - BUF_END(r));
305 r->st.written_bytes += chunk_sz;
306 r->st.written_records++;
307 raw_spin_unlock_irqrestore(&r->wlock, flags);
308
309 return chunk_sz;
310 }
311
312 static int wlog_default_ring_config_change(struct scsc_wlog_ring *r,
313 u32 verbose_level, u32 flags,
314 u32 max_interval_sec,
315 u32 min_data_size)
316 {
317 u32 old_interval_sec;
318
319 SCSC_TAG_DEBUG(WLOG, "Ring: %s -- configuration change.\n",
320 r->st.name);
321
322 r->min_data_size = min_data_size;
323 old_interval_sec = r->max_interval_sec;
324 r->max_interval_sec = max_interval_sec;
325
326 if (r->state == RING_STATE_SUSPEND && r->st.verbose_level) {
327 /* Restarting timeri where required ...
328 * it will take care to queue_work back.
329 */
330 if (r->max_interval_sec)
331 mod_timer(&r->drain_timer,
332 jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
333 r->state = RING_STATE_ACTIVE;
334 SCSC_TAG_INFO(WLOG, "ACTIVATED ring: %s\n", r->st.name);
335 } else if (r->state == RING_STATE_ACTIVE && !r->st.verbose_level) {
336 /* Stop timer, cancel pending work */
337 del_timer_sync(&r->drain_timer);
338 cancel_work_sync(&r->drain_work);
339 r->state = RING_STATE_SUSPEND;
340 SCSC_TAG_INFO(WLOG, "SUSPENDED ring: %s\n", r->st.name);
341 } else if (r->state == RING_STATE_ACTIVE) {
342 if (old_interval_sec != r->max_interval_sec) {
343 if (!r->max_interval_sec)
344 del_timer_sync(&r->drain_timer);
345 else
346 mod_timer(&r->drain_timer,
347 jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
348 }
349 SCSC_TAG_INFO(WLOG, "RECONFIGURED ring: %s\n", r->st.name);
350 }
351
352 return 0;
353 }
354
355 static wifi_error wlog_start_logging(struct scsc_wlog_ring *r,
356 u32 verbose_level, u32 flags,
357 u32 max_interval_sec,
358 u32 min_data_size)
359 {
360 if (!r)
361 return WIFI_ERROR_INVALID_ARGS;
362
363 scsc_wlog_ring_change_verbosity(r, verbose_level);
364 wlog_default_ring_config_change(r, verbose_level, flags,
365 max_interval_sec, min_data_size);
366
367 return WIFI_SUCCESS;
368 }
369
370 static struct scsc_wlog_ring_ops default_ring_ops = {
371 .init = NULL,
372 .finalize = NULL,
373 .get_ring_status = wlog_get_ring_status,
374 .read_records = wlog_read_records,
375 .write_record = wlog_write_record,
376 .loglevel_change = NULL,
377 .drain_ring = wlog_default_ring_drainer,
378 .start_logging = wlog_start_logging,
379 };
380
381 void scsc_wlog_ring_destroy(struct scsc_wlog_ring *r)
382 {
383 if (!r || r->registered) {
384 SCSC_TAG_ERR(WLOG, "Cannot destroy ring r:%p\n", r);
385 return;
386 }
387 /* If initialized call custom finalizer at first..reverse order */
388 if (r->initialized && r->ops.finalize)
389 r->ops.finalize(r);
390 wlog_ring_finalize(r);
391 kfree(r);
392 }
393
394 struct scsc_wlog_ring *scsc_wlog_ring_create(char *ring_name, u32 flags,
395 u8 type, u32 size,
396 unsigned int features_mask,
397 init_cb init, finalize_cb fini,
398 void *priv)
399 {
400 struct scsc_wlog_ring *r = NULL;
401
402 WARN_ON(!ring_name || !size);
403
404 r = kzalloc(sizeof(*r), GFP_KERNEL);
405 if (!r)
406 return r;
407 r->type = type;
408 r->st.flags = flags;
409 r->st.rb_byte_size = size;
410 if (snprintf(r->st.name, RING_NAME_SZ - 1, "%s", ring_name) >= RING_NAME_SZ)
411 SCSC_TAG_WARNING(WLOG, "Ring name too long...truncated to: %s\n",
412 r->st.name);
413 /* Setup defaults and configure init finalize if any provided */
414 memcpy(&r->ops, &default_ring_ops, sizeof(struct scsc_wlog_ring_ops));
415 r->ops.init = init;
416 r->ops.finalize = fini;
417 r->priv = priv;
418 /* Basic common initialization is called first */
419 if (wlog_ring_init(r)) {
420 SCSC_TAG_ERR(WLOG,
421 "Wi-Fi Logger Ring %s basic initialization failed.\n",
422 r->st.name);
423 kfree(r);
424 return NULL;
425 }
426 if (r->ops.init) {
427 if (r->ops.init(r)) {
428 SCSC_TAG_DBG4(WLOG,
429 "Ring %s custom init completed\n",
430 r->st.name);
431 } else {
432 SCSC_TAG_ERR(WLOG,
433 "Ring %s custom init FAILED !\n",
434 r->st.name);
435 scsc_wlog_ring_destroy(r);
436 return NULL;
437 }
438 }
439 r->features_mask = features_mask;
440 raw_spin_lock_init(&r->rlock);
441 raw_spin_lock_init(&r->wlock);
442 r->initialized = true;
443 SCSC_TAG_DEBUG(WLOG, "Ring '%s' initialized.\n", r->st.name);
444
445 return r;
446 }
447
448 int scsc_wlog_register_loglevel_change_cb(struct scsc_wlog_ring *r,
449 int (*callback)(struct scsc_wlog_ring *r, u32 new_loglevel))
450 {
451 if (!callback)
452 r->ops.loglevel_change = NULL;
453 else
454 r->ops.loglevel_change = callback;
455
456 return 0;
457 }
458
459 int scsc_wlog_drain_whole_ring(struct scsc_wlog_ring *r)
460 {
461 SCSC_TAG_INFO(WLOG, "Draining whole ring %s\n", r->st.name);
462 return r->ops.drain_ring(r, r->st.rb_byte_size);
463 }
464
465 void scsc_wlog_flush_ring(struct scsc_wlog_ring *r)
466 {
467 r->flushing = true;
468 /* kick the workq...which will take care of flushing */
469 queue_work(r->drain_workq, &r->drain_work);
470 }