1 /******************************************************************************
3 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
5 ******************************************************************************/
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mutex.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/time.h>
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <scsc/scsc_logring.h>
18 #include "scsc_wifilogger_core.h"
20 static atomic_t next_ring_id
;
22 static void wlog_drain_worker(struct work_struct
*work
)
24 struct scsc_wlog_ring
*r
;
26 r
= container_of(work
, struct scsc_wlog_ring
, drain_work
);
28 if (r
&& r
->ops
.drain_ring
)
29 r
->ops
.drain_ring(r
, r
->flushing
? r
->st
.rb_byte_size
: DEFAULT_DRAIN_CHUNK_SZ(r
));
32 static void drain_timer_callback(unsigned long data
)
34 struct scsc_wlog_ring
*r
= (struct scsc_wlog_ring
*)data
;
36 SCSC_TAG_DBG4(WLOG
, "TIMER DRAIN : %p\n", r
);
37 /* we should kick the workqueue here...no sleep */
38 queue_work(r
->drain_workq
, &r
->drain_work
);
40 if (r
->st
.verbose_level
&& r
->max_interval_sec
) {
41 mod_timer(&r
->drain_timer
,
42 jiffies
+ msecs_to_jiffies(r
->max_interval_sec
* 1000));
43 SCSC_TAG_DBG4(WLOG
, "TIMER RELOADED !!!\n");
47 static int wlog_ring_init(struct scsc_wlog_ring
*r
)
49 /* Allocate buffer and spare area */
50 r
->buf
= kzalloc(r
->st
.rb_byte_size
+ MAX_RECORD_SZ
, GFP_KERNEL
);
53 r
->drain_sz
= DRAIN_BUF_SZ
;
54 r
->drain_buf
= kzalloc(r
->drain_sz
, GFP_KERNEL
);
59 mutex_init(&r
->drain_lock
);
61 r
->drain_workq
= create_workqueue("wifilogger");
62 INIT_WORK(&r
->drain_work
, wlog_drain_worker
);
63 setup_timer(&r
->drain_timer
, drain_timer_callback
, (unsigned long)r
);
65 r
->st
.ring_id
= atomic_read(&next_ring_id
);
66 atomic_inc(&next_ring_id
);
68 SCSC_TAG_DBG3(WLOG
, "Workers initialized for ring[%p]: %s\n",
74 static void wlog_ring_finalize(struct scsc_wlog_ring
*r
)
79 cancel_work_sync(&r
->drain_work
);
80 del_timer_sync(&r
->drain_timer
);
81 destroy_workqueue(r
->drain_workq
);
83 r
->initialized
= false;
89 static wifi_error
wlog_get_ring_status(struct scsc_wlog_ring
*r
,
90 struct scsc_wifi_ring_buffer_status
*status
)
93 return WIFI_ERROR_INVALID_ARGS
;
100 static int wlog_read_records(struct scsc_wlog_ring
*r
, u8
*buf
,
101 size_t blen
, u32
*records
,
102 struct scsc_wifi_ring_buffer_status
*status
)
104 u16 read_bytes
= 0, rec_sz
= 0;
105 u32 got_records
= 0, req_records
= -1;
107 if (scsc_wlog_ring_is_flushing(r
))
111 * req_records has been loaded with a max u32 value by default
112 * on purpose...if a max number of records is provided in records
113 * update req_records accordingly
116 req_records
= *records
;
118 * We have ONLY ONE READER at any time that consumes data, impersonated
119 * here by the drain_ring drainer callback, whose read-ops are ensured
120 * atomic by the drain_lock mutex: this will guard against races
121 * between the periodic-drain worker and the threshold-drain procedure
122 * triggered by the write itself.
124 * But we want also to guard against any direct read_record invokation
125 * like in test rings via debugfs so we add a read spinlock: this last
126 * won't lead to any contention here anyway most of the time in a
127 * real scenario so the same reason we don't need either any irqsave
128 * spinlock version....so latency also is not impacted.
130 raw_spin_lock(&r
->rlock
);
131 while (!scsc_wlog_is_ring_empty(r
) && got_records
< req_records
) {
132 rec_sz
= REC_SZ(r
, RPOS(r
));
133 if (read_bytes
+ rec_sz
> blen
)
136 * Rollover is transparent on read...last written material in
137 * spare is still there...
139 memcpy(buf
+ read_bytes
, REC_START(r
, RPOS(r
)), rec_sz
);
140 read_bytes
+= rec_sz
;
141 r
->st
.read_bytes
+= rec_sz
;
146 raw_spin_unlock(&r
->rlock
);
149 *records
= got_records
;
150 SCSC_TAG_DBG4(WLOG
, "BytesRead:%d -- RecordsRead:%d\n",
151 read_bytes
, got_records
);
156 static int wlog_default_ring_drainer(struct scsc_wlog_ring
*r
, size_t drain_sz
)
158 int rval
= 0, drained_bytes
= 0;
159 size_t chunk_sz
= drain_sz
<= r
->drain_sz
? drain_sz
: r
->drain_sz
;
160 struct scsc_wifi_ring_buffer_status ring_status
= {};
162 /* An SRCU on callback here would better */
163 mutex_lock(&r
->drain_lock
);
165 /* drain ... consumes data */
166 rval
= r
->ops
.read_records(r
, r
->drain_buf
, chunk_sz
, NULL
, &ring_status
);
167 /* and push...if any callback defined */
169 mutex_lock(&r
->wl
->lock
);
170 if (rval
> 0 && r
->wl
->on_ring_buffer_data_cb
) {
172 "Invoking registered log_handler:%p to drain %d bytes\n",
173 r
->wl
->on_ring_buffer_data_cb
, rval
);
174 r
->wl
->on_ring_buffer_data_cb(r
->st
.name
, r
->drain_buf
, rval
,
175 &ring_status
, r
->wl
->on_ring_buffer_ctx
);
176 SCSC_TAG_DBG4(WLOG
, "Callback processed %d bytes\n", rval
);
178 mutex_unlock(&r
->wl
->lock
);
180 drained_bytes
+= rval
;
181 } while (rval
&& drained_bytes
<= drain_sz
);
182 SCSC_TAG_DBG3(WLOG
, "%s %d bytes\n", (r
->flushing
) ? "Flushed" : "Drained",
185 /* Execute flush if required... */
189 /* Inhibit writers momentarily */
190 raw_spin_lock_irqsave(&r
->wlock
, flags
);
192 r
->st
.written_records
= 0;
193 r
->st
.read_bytes
= r
->st
.written_bytes
= 0;
195 raw_spin_unlock_irqrestore(&r
->wlock
, flags
);
196 SCSC_TAG_INFO(WLOG
, "Ring '%s' flushed.\n", r
->st
.name
);
198 mutex_unlock(&r
->drain_lock
);
200 return drained_bytes
;
204 * A generic write that takes care to build the final payload created
206 * - the common record-header
207 * - an optionally provided ring_hdr
208 * - the provided payload buf
210 * The optional header is passed down as a separate parameters to avoid
211 * unnecessary intermediate copies: this function will copy all the bits
212 * in place directly into the proper calculated ring position.
214 * By design a read-end-point is always provided by the framework
215 * (in terms of netlink channels towards the WiFi-HAL) so we spawn a
216 * configurable reader-worker upon start of logging, and the same reader
217 * is also invoked when ring is running out of space: for these reasons
218 * the ring is meant NOT to overwrite itself ever.
220 * If NO periodic reader is spawned NOR a min_data_size threshold was
221 * specified to force kick the periodic drainer, we could just end-up
222 * filling up the ring: in that case we just drop and account for it.
224 * Data is drained and pushed periodically upstream using the
225 * on_ring_buffer_data_cb if any provided and periodic drain was
228 * @r: the referenced ring
231 * @ring_hdr: upper-layer-record-header
232 * @hlen: upper-layer-record-header length
233 * @verbose_level: loglevel for this message (to be checked against)
234 * @timestamp: a providewd timestamp (if any). If zero a timestamp will be
237 * Final injected record will be composed as follows:
239 * |common_hdr|ring_hdr|buf|
241 * where the common header is compued and filled in by this function, and the
242 * provided additional upper-layer header ring_hdr could be not provided.
244 * THIS BASIC RING OPERATION IS THE WORKHORSE USED BY THE PRODUCER API IMPLEMENTED
245 * BY REAL RINGS, AND AS SUCH COULD BE INVOKED FROM ANY CONTEXTS...SO IT MUST NOT SLEEP.
247 static int wlog_write_record(struct scsc_wlog_ring
*r
, u8
*buf
, size_t blen
,
248 void *ring_hdr
, size_t hlen
, u32 verbose_level
, u64 timestamp
)
254 if (scsc_wlog_ring_is_flushing(r
))
257 /* Just drop messages above configured verbose level. 0 is disabled */
258 if (!scsc_wlog_is_message_allowed(r
, verbose_level
))
261 //TODO Account for missing timestamp
262 chunk_sz
= sizeof(struct scsc_wifi_ring_buffer_entry
) + hlen
+ blen
;
263 if (chunk_sz
> MAX_RECORD_SZ
) {
264 SCSC_TAG_WARNING(WLOG
, "Dropping record exceeding %d bytes\n",
269 raw_spin_lock_irqsave(&r
->wlock
, flags
);
271 * Are there enough data to drain ?
272 * if so...drain...queueing work....
273 * if not (min_data_size == 0) just do nothing
275 if (!r
->drop_on_full
&& r
->min_data_size
&&
276 AVAIL_BYTES(r
) >= r
->min_data_size
)
277 queue_work(r
->drain_workq
, &r
->drain_work
);
279 * If no min_data_size was specified, NOR a periodic read-worker
280 * was configured (i.e. max_interval_sec == 0), we could end up
281 * filling up the ring...in that case just drop...accounting for it.
283 * This is the case when packet_fate rings fills up...
285 if (!CAN_FIT(r
, chunk_sz
)) {
286 SCSC_TAG_DBG4(WLOG
, "[%s]:: dropped %zd bytes\n",
287 r
->st
.name
, blen
+ hlen
);
288 r
->dropped
+= blen
+ hlen
;
289 raw_spin_unlock_irqrestore(&r
->wlock
, flags
);
293 start
= REC_START(r
, WPOS(r
));
294 REC_HEADER_FILL(start
, hlen
+ blen
, timestamp
, (u8
)r
->st
.flags
, r
->type
);
295 start
+= sizeof(struct scsc_wifi_ring_buffer_entry
);
297 memcpy(start
, ring_hdr
, hlen
);
301 memcpy(start
, buf
, blen
);
302 /* Account for rollover using spare area at end of ring... */
303 if (start
+ blen
> BUF_END(r
))
304 memcpy(BUF_START(r
), BUF_END(r
), start
+ blen
- BUF_END(r
));
305 r
->st
.written_bytes
+= chunk_sz
;
306 r
->st
.written_records
++;
307 raw_spin_unlock_irqrestore(&r
->wlock
, flags
);
312 static int wlog_default_ring_config_change(struct scsc_wlog_ring
*r
,
313 u32 verbose_level
, u32 flags
,
314 u32 max_interval_sec
,
317 u32 old_interval_sec
;
319 SCSC_TAG_DEBUG(WLOG
, "Ring: %s -- configuration change.\n",
322 r
->min_data_size
= min_data_size
;
323 old_interval_sec
= r
->max_interval_sec
;
324 r
->max_interval_sec
= max_interval_sec
;
326 if (r
->state
== RING_STATE_SUSPEND
&& r
->st
.verbose_level
) {
327 /* Restarting timeri where required ...
328 * it will take care to queue_work back.
330 if (r
->max_interval_sec
)
331 mod_timer(&r
->drain_timer
,
332 jiffies
+ msecs_to_jiffies(r
->max_interval_sec
* 1000));
333 r
->state
= RING_STATE_ACTIVE
;
334 SCSC_TAG_INFO(WLOG
, "ACTIVATED ring: %s\n", r
->st
.name
);
335 } else if (r
->state
== RING_STATE_ACTIVE
&& !r
->st
.verbose_level
) {
336 /* Stop timer, cancel pending work */
337 del_timer_sync(&r
->drain_timer
);
338 cancel_work_sync(&r
->drain_work
);
339 r
->state
= RING_STATE_SUSPEND
;
340 SCSC_TAG_INFO(WLOG
, "SUSPENDED ring: %s\n", r
->st
.name
);
341 } else if (r
->state
== RING_STATE_ACTIVE
) {
342 if (old_interval_sec
!= r
->max_interval_sec
) {
343 if (!r
->max_interval_sec
)
344 del_timer_sync(&r
->drain_timer
);
346 mod_timer(&r
->drain_timer
,
347 jiffies
+ msecs_to_jiffies(r
->max_interval_sec
* 1000));
349 SCSC_TAG_INFO(WLOG
, "RECONFIGURED ring: %s\n", r
->st
.name
);
355 static wifi_error
wlog_start_logging(struct scsc_wlog_ring
*r
,
356 u32 verbose_level
, u32 flags
,
357 u32 max_interval_sec
,
361 return WIFI_ERROR_INVALID_ARGS
;
363 scsc_wlog_ring_change_verbosity(r
, verbose_level
);
364 wlog_default_ring_config_change(r
, verbose_level
, flags
,
365 max_interval_sec
, min_data_size
);
370 static struct scsc_wlog_ring_ops default_ring_ops
= {
373 .get_ring_status
= wlog_get_ring_status
,
374 .read_records
= wlog_read_records
,
375 .write_record
= wlog_write_record
,
376 .loglevel_change
= NULL
,
377 .drain_ring
= wlog_default_ring_drainer
,
378 .start_logging
= wlog_start_logging
,
381 void scsc_wlog_ring_destroy(struct scsc_wlog_ring
*r
)
383 if (!r
|| r
->registered
) {
384 SCSC_TAG_ERR(WLOG
, "Cannot destroy ring r:%p\n", r
);
387 /* If initialized call custom finalizer at first..reverse order */
388 if (r
->initialized
&& r
->ops
.finalize
)
390 wlog_ring_finalize(r
);
394 struct scsc_wlog_ring
*scsc_wlog_ring_create(char *ring_name
, u32 flags
,
396 unsigned int features_mask
,
397 init_cb init
, finalize_cb fini
,
400 struct scsc_wlog_ring
*r
= NULL
;
402 WARN_ON(!ring_name
|| !size
);
404 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
409 r
->st
.rb_byte_size
= size
;
410 if (snprintf(r
->st
.name
, RING_NAME_SZ
- 1, "%s", ring_name
) >= RING_NAME_SZ
)
411 SCSC_TAG_WARNING(WLOG
, "Ring name too long...truncated to: %s\n",
413 /* Setup defaults and configure init finalize if any provided */
414 memcpy(&r
->ops
, &default_ring_ops
, sizeof(struct scsc_wlog_ring_ops
));
416 r
->ops
.finalize
= fini
;
418 /* Basic common initialization is called first */
419 if (wlog_ring_init(r
)) {
421 "Wi-Fi Logger Ring %s basic initialization failed.\n",
427 if (r
->ops
.init(r
)) {
429 "Ring %s custom init completed\n",
433 "Ring %s custom init FAILED !\n",
435 scsc_wlog_ring_destroy(r
);
439 r
->features_mask
= features_mask
;
440 raw_spin_lock_init(&r
->rlock
);
441 raw_spin_lock_init(&r
->wlock
);
442 r
->initialized
= true;
443 SCSC_TAG_DEBUG(WLOG
, "Ring '%s' initialized.\n", r
->st
.name
);
448 int scsc_wlog_register_loglevel_change_cb(struct scsc_wlog_ring
*r
,
449 int (*callback
)(struct scsc_wlog_ring
*r
, u32 new_loglevel
))
452 r
->ops
.loglevel_change
= NULL
;
454 r
->ops
.loglevel_change
= callback
;
459 int scsc_wlog_drain_whole_ring(struct scsc_wlog_ring
*r
)
461 SCSC_TAG_INFO(WLOG
, "Draining whole ring %s\n", r
->st
.name
);
462 return r
->ops
.drain_ring(r
, r
->st
.rb_byte_size
);
465 void scsc_wlog_flush_ring(struct scsc_wlog_ring
*r
)
468 /* kick the workq...which will take care of flushing */
469 queue_work(r
->drain_workq
, &r
->drain_work
);