[RAMEN9610-20413][9610] wlbt: SCSC Driver version 10.6.1.0
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / misc / samsung / scsc / scsc_logring_ring.c
1 /****************************************************************************
2 *
3 * Copyright (c) 2016-2018 Samsung Electronics Co., Ltd. All rights reserved.
4 *
5 ****************************************************************************/
6
7 #include "scsc_logring_ring.h"
8
9 #ifdef CONFIG_SCSC_STATIC_RING_SIZE
10 static char a_ring[CONFIG_SCSC_STATIC_RING_SIZE + BASE_SPARE_SZ] __aligned(4);
11 #endif
12
13 static int scsc_decode_binary_len = DEFAULT_BIN_DECODE_LEN;
14 module_param(scsc_decode_binary_len, int, S_IRUGO | S_IWUSR);
15 SCSC_MODPARAM_DESC(scsc_decode_binary_len,
16 "When reading a binary record dump these bytes-len in ASCII human readable form when reading",
17 "run-time", DEFAULT_BIN_DECODE_LEN);
18
19 /*
20 * NOTE_CREATING_TAGS: when adding a tag string here REMEMBER to add
21 * it also where required, taking care to maintain the same ordering.
22 * (Search 4 NOTE_CREATING_TAGS)
23 */
24 const char *tagstr[MAX_TAG + 1] = {
25 "binary",
26 "bin_wifi_ctrl_rx",
27 "bin_wifi_data_rx",
28 "bin_wifi_ctrl_tx",
29 "bin_wifi_data_tx",
30 "wlbt", /* this is the generic one...NO_TAG */
31 "wifi_rx",
32 "wifi_tx",
33 "bt_common",
34 "bt_h4",
35 "bt_fw",
36 "bt_rx",
37 "bt_tx",
38 "cpktbuff",
39 "fw_load",
40 "fw_panic",
41 "gdb_trans",
42 "mif",
43 "clk20",
44 "clk20_test",
45 "fm",
46 "fm_test",
47 "mx_file",
48 "mx_fw",
49 "mx_sampler",
50 "mxlog_trans",
51 "mxman",
52 "mxman_test",
53 "mxmgt_trans",
54 "mx_mmap",
55 "mx_proc",
56 "panic_mon",
57 "pcie_mif",
58 "plat_mif",
59 "kic_common",
60 "wlbtd",
61 "wlog",
62 "lerna",
63 "mx_cfg",
64 #ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
65 "init_deinit",
66 "netdev",
67 "cfg80211",
68 "mlme",
69 "summary_frames",
70 "hydra",
71 "tx",
72 "rx",
73 "udi",
74 "wifi_fcq",
75 "hip",
76 "hip_init_deinit",
77 "hip_fw_dl",
78 "hip_sdio_op",
79 "hip_ps",
80 "hip_th",
81 "hip_fh",
82 "hip_sig",
83 "func_trace",
84 "test",
85 "src_sink",
86 "fw_test",
87 "rx_ba",
88 "tdls",
89 "gscan",
90 "mbulk",
91 "flowc",
92 "smapper",
93 #endif
94 "test_me"
95 };
96
97 /**
98 * Calculate and returns the CRC32 for the provided record and record pos.
99 * Before calculating the CRC32 the crc field is temporarily substituted
100 * with the 32 LSB record relative starting position.
101 * Assumes the rec ptr area-validity has been checked upstream in the
102 * caller chain.
103 * We SKIP the fixed blob of the SYNC field that is placed ahead of
104 * CRC field.
105 * Assumes the related ring buffer is currently atomically accessed by
106 * caller. MUST NOT SLEEP.
107 */
108 static inline uint32_t get_calculated_crc(struct scsc_ring_record *rec,
109 loff_t pos)
110 {
111 uint32_t calculated_crc = 0;
112 uint32_t saved_crc = 0;
113
114 saved_crc = rec->crc;
115 rec->crc = (uint32_t)pos;
116 /* we skip the fixed sync calculating crc */
117 calculated_crc =
118 crc32_le(~0, (unsigned char const *)&rec->crc,
119 SCSC_CRC_RINGREC_SZ);
120 rec->crc = saved_crc;
121 return calculated_crc;
122 }
123
124 /**
125 * Checks for record CRC sanity.
126 * Assumes the related ring buffer is currently atomically accessed by
127 * caller. MUST NOT SLEEP.
128 */
129 static inline bool is_record_crc_valid(struct scsc_ring_record *rec,
130 loff_t pos)
131 {
132 uint32_t calculated_crc = 0;
133
134 calculated_crc = get_calculated_crc(rec, pos);
135 return calculated_crc == rec->crc;
136 }
137
138 /**
139 * Calculate the proper CRC and set it into the crc field
140 * Assumes the related ring buffer is currently atomically accessed by
141 * caller. MUST NOT SLEEP.
142 */
143 static inline void finalize_record_crc(struct scsc_ring_record *rec,
144 loff_t pos)
145 {
146 uint32_t calculated_crc = 0;
147
148 if (!rec)
149 return;
150 rec->crc = (uint32_t)pos;
151 calculated_crc =
152 crc32_le(~0, (unsigned char const *)&rec->crc,
153 SCSC_CRC_RINGREC_SZ);
154 rec->crc = calculated_crc;
155 }
156
157 /**
158 * This function analyzes the pos provided relative to the provided
159 * ring, just to understand if it can be safely dereferenced.
160 * Assumes RING is already locked.
161 */
162 static inline bool is_ring_pos_safe(struct scsc_ring_buffer *rb,
163 loff_t pos)
164 {
165 if (!rb || pos > rb->bsz || pos < 0)
166 return false;
167 /* NOT Wrapped */
168 if (rb->head > rb->tail && pos > rb->head)
169 return false;
170 /* Wrapped... */
171 if (rb->head < rb->tail &&
172 (pos > rb->head && pos < rb->tail))
173 return false;
174 return true;
175 }
176
177 /**
178 * This sanitizes record header before using it.
179 * It must be in the proper area related to head and tail and
180 * the CRC must fit the header.
181 */
182 static inline bool is_ring_read_pos_valid(struct scsc_ring_buffer *rb,
183 loff_t pos)
184 {
185 if (!is_ring_pos_safe(rb, pos))
186 goto oos;
187 /* We do not check for SYNC before CRC since most of the time
188 * you are NOT OutOfSync and so you MUST check CRC anyway.
189 * It will be useful only for resync.
190 * At last...Check CRC ... doing this check LAST avoids the risk of
191 * dereferencing an already dangling pos pointer.
192 */
193 if (!is_record_crc_valid(SCSC_GET_REC(rb, pos), pos))
194 goto oos;
195 return true;
196 oos:
197 if (rb)
198 rb->oos++;
199 return false;
200 }
201
202
203 /**
204 * Buid a header into the provided buffer,
205 * and append the optional trail string
206 */
207 static inline
208 int build_header(char *buf, int blen, struct scsc_ring_record *r,
209 const char *trail)
210 {
211 int written = 0;
212 struct timeval tval = {};
213
214 tval = ns_to_timeval(r->nsec);
215 written = scnprintf(buf, blen,
216 "<%d>[%6lu.%06ld] [c%d] [%c] [%s] :: %s",
217 r->lev, tval.tv_sec, tval.tv_usec,
218 r->core, (char)r->ctx, tagstr[r->tag],
219 (trail) ? : "");
220 return written;
221 }
222
223
224 /**
225 * We're going to overwrite something writing from the head toward the tail
226 * so we must search for the next tail far enough from head in oder not to be
227 * overwritten: that will be our new tail after the wrap over.
228 */
229 static inline
230 loff_t find_next_tail_far_enough_from_start(struct scsc_ring_buffer *rb,
231 loff_t start, int len)
232 {
233 loff_t new_tail = rb->tail;
234
235 while (start + len >= new_tail && new_tail != rb->last) {
236 new_tail = SCSC_GET_NEXT_REC_ENTRY_POS(rb, new_tail);
237 rb->records--;
238 }
239 if (start + len >= new_tail) {
240 new_tail = 0;
241 rb->records--;
242 }
243 return new_tail;
244 }
245
246 /**
247 * This handles the just plain append of a record to head without
248 * any need of wrapping or overwriting current tail
249 * You can provide two buffer here: the second, hbuf, is optional
250 * and will be written first. This is to account for the binary case
251 * in which the record data are written at first into the spare area
252 * (like we do with var strings, BUT then the bulk of binary data is
253 * written directly in place into the ring without double copies.
254 */
255 static inline
256 void scsc_ring_buffer_plain_append(struct scsc_ring_buffer *rb,
257 const char *srcbuf, int slen,
258 const char *hbuf, int hlen)
259 {
260 /* empty condition is special case */
261 if (rb->records)
262 rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
263 if (hbuf)
264 memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
265 else
266 hlen = 0;
267 memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
268 finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
269 rb->head);
270 rb->records++;
271 if (rb->head > rb->last)
272 rb->last = rb->head;
273 }
274
275
276 /**
277 * This handles the case in which appending current record must account
278 * for overwriting: this sitiation can happen at the end of ring if we do NOT
279 * have enough space for the current record, or in any place when the buffer
280 * has wrapped, head is before tail and there's not enough space to write
281 * between current head and tail.
282 */
283 static inline
284 void scsc_ring_buffer_overlap_append(struct scsc_ring_buffer *rb,
285 const char *srcbuf, int slen,
286 const char *hbuf, int hlen)
287 {
288 if (rb->head < rb->tail &&
289 slen + hlen < rb->bsz - SCSC_GET_NEXT_SLOT_POS(rb, rb->head))
290 rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
291 else {
292 rb->last = rb->head;
293 rb->head = 0;
294 rb->tail = 0;
295 rb->wraps++;
296 }
297 rb->tail =
298 find_next_tail_far_enough_from_start(rb, rb->head, slen + hlen);
299 if (hbuf)
300 memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
301 else
302 hlen = 0;
303 memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
304 finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
305 rb->head);
306 rb->records++;
307 if (rb->head > rb->last)
308 rb->last = rb->head;
309 }
310
311
312 /**
313 * This uses the spare area to prepare the record descriptor and to expand
314 * the format string into the spare area in order to get the final lenght of
315 * the whole record+data. Data is pre-pended with a header representing the
316 * data hold in binary form in the record descriptor.
317 * This data duplication helps when we'll read back a record holding string
318 * data, we won't have to build the header on the fly during the read.
319 */
320 static inline
321 int tag_writer_string(char *spare, int tag, int lev,
322 int prepend_header, const char *msg_head, va_list args)
323 {
324 int written;
325 char bheader[SCSC_HBUF_LEN] = {};
326 struct scsc_ring_record *rrec;
327
328 /* Fill record in place */
329 rrec = (struct scsc_ring_record *)spare;
330 SCSC_FILL_RING_RECORD(rrec, tag, lev);
331 if (prepend_header)
332 build_header(bheader, SCSC_HBUF_LEN, rrec, NULL);
333 written = scnprintf(SCSC_GET_REC_BUF(spare),
334 BASE_SPARE_SZ - SCSC_RINGREC_SZ, "%s", bheader);
335 /**
336 * NOTE THAT
337 * ---------
338 * vscnprintf retvalue is the number of characters which have been
339 * written into the @buf NOT including the trailing '\0'.
340 * If @size is == 0 the function returns 0.
341 * Here we enforce a line lenght limit equal to
342 * BASE_SPARE_SZ - SCSC_RINGREC_SZ.
343 */
344 written += vscnprintf(SCSC_GET_REC_BUF(spare) + written,
345 BASE_SPARE_SZ - SCSC_RINGREC_SZ - written,
346 msg_head, args);
347 /* complete record metadata */
348 rrec->len = written;
349 return written;
350 }
351
352 /**
353 * A ring API function to push variable length format string into the buffer
354 * After the record has been created and pushed into the ring any process
355 * waiting on the related waiting queue is awakened.
356 */
357 int push_record_string(struct scsc_ring_buffer *rb, int tag, int lev,
358 int prepend_header, const char *msg_head, va_list args)
359 {
360 int rec_len = 0;
361 loff_t free_bytes;
362 unsigned long flags;
363
364 /* Prepare ring_record and header if needed */
365 raw_spin_lock_irqsave(&rb->lock, flags);
366 rec_len = tag_writer_string(rb->spare, tag, lev, prepend_header,
367 msg_head, args);
368 /* Line too long anyway drop */
369 if (rec_len >= BASE_SPARE_SZ - SCSC_RINGREC_SZ) {
370 raw_spin_unlock_irqrestore(&rb->lock, flags);
371 return 0;
372 }
373 free_bytes = SCSC_RING_FREE_BYTES(rb);
374 /**
375 * Evaluate if it's a trivial append or if we must account for
376 * any overwrap. Note that we do NOT truncate record across ring
377 * boundaries, if a record does NOT fit at the end of buffer,
378 * we'll write it from start directly.
379 */
380 if (rec_len + SCSC_RINGREC_SZ < free_bytes)
381 scsc_ring_buffer_plain_append(rb, rb->spare,
382 SCSC_RINGREC_SZ + rec_len,
383 NULL, 0);
384 else
385 scsc_ring_buffer_overlap_append(rb, rb->spare,
386 SCSC_RINGREC_SZ + rec_len,
387 NULL, 0);
388 rb->written += rec_len;
389 raw_spin_unlock_irqrestore(&rb->lock, flags);
390 /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
391 wake_up_interruptible(&rb->wq);
392 return rec_len;
393 }
394
395 /* This simply builds up a record descriptor for a binary entry. */
396 static inline
397 int tag_writer_binary(char *spare, int tag, int lev, size_t hexlen)
398 {
399 struct scsc_ring_record *rrec;
400
401 rrec = (struct scsc_ring_record *)spare;
402 SCSC_FILL_RING_RECORD(rrec, tag, lev);
403 rrec->len = hexlen;
404
405 return hexlen;
406 }
407
408 /**
409 * A ring API function to push binary data into the ring buffer. Binary data
410 * is copied from the start/len specified location.
411 * After the record has been created and pushed into the ring any process
412 * waiting on the related waiting queue is awakened.
413 */
414 int push_record_blob(struct scsc_ring_buffer *rb, int tag, int lev,
415 int prepend_header, const void *start, size_t len)
416 {
417 loff_t free_bytes;
418 unsigned long flags;
419
420 if (len > SCSC_MAX_BIN_BLOB_SZ)
421 len = SCSC_MAX_BIN_BLOB_SZ;
422 /* Prepare ring_record and header if needed */
423 raw_spin_lock_irqsave(&rb->lock, flags);
424 memset(rb->spare, 0x00, rb->ssz);
425 tag_writer_binary(rb->spare, tag, lev, len);
426 free_bytes = SCSC_RING_FREE_BYTES(rb);
427 if (len + SCSC_RINGREC_SZ < free_bytes)
428 scsc_ring_buffer_plain_append(rb, start, len,
429 rb->spare, SCSC_RINGREC_SZ);
430 else
431 scsc_ring_buffer_overlap_append(rb, start, len,
432 rb->spare, SCSC_RINGREC_SZ);
433 rb->written += len;
434 raw_spin_unlock_irqrestore(&rb->lock, flags);
435 /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
436 wake_up_interruptible(&rb->wq);
437 return len;
438 }
439
440 /* A simple reader used to retrieve a string from the record
441 * It always return ONE WHOLE RECORD if it fits the provided tbuf OR NOTHING.
442 */
443 static inline
444 size_t tag_reader_string(char *tbuf, struct scsc_ring_buffer *rb,
445 int start_rec, size_t tsz)
446 {
447 size_t max_chunk = SCSC_GET_REC_LEN(SCSC_GET_PTR(rb, start_rec));
448
449 if (max_chunk <= tsz)
450 memcpy(tbuf, SCSC_GET_REC_BUF(rb->buf + start_rec), max_chunk);
451 else
452 max_chunk = 0;
453 return max_chunk;
454 }
455
456 /*
457 * Helper to dump binary data in ASCII readable form up to
458 * scsc_decode_binary_len bytes: when such modparam is set to -1
459 * this will dump all the available data. Data is dumped onto the
460 * output buffer with an endianity that conforms to the data as
461 * dumped by the print_hex_dump() kernel standard facility.
462 */
463 static inline
464 int binary_hexdump(char *tbuf, int tsz, struct scsc_ring_record *rrec,
465 int start, int dlen)
466 {
467 int i, j, bytepos;
468 unsigned char *blob = SCSC_GET_REC_BUF(rrec);
469 char *hmap = "0123456789abcdef";
470
471 /**
472 * Scan the buffer reversing endianity when appropriate and
473 * producing ASCII human readable output while obeying chosen
474 * maximum decoden_len dlen.
475 */
476 for (j = start, i = 0; j < tsz && i < rrec->len && i < dlen; i += 4) {
477 bytepos = (rrec->len - i - 1 >= 3) ? 3 : rrec->len - i - 1;
478 /* Reverse endianity to little only on 4-byte boundary */
479 if (bytepos == 3) {
480 for (; bytepos >= 0; bytepos--) {
481 if (i + bytepos >= dlen)
482 continue;
483 tbuf[j++] = hmap[blob[i + bytepos] >> 4 & 0x0f];
484 tbuf[j++] = hmap[blob[i + bytepos] & 0x0f];
485 }
486 } else {
487 int bb;
488
489 /**
490 * Trailing bytes NOT aligned on a 4-byte boundary
491 * should be decoded maintaining the original endianity.
492 * This way we obtain a binary output perfectly equal
493 * to the one generated by the original UDI tools.
494 */
495 for (bb = 0; bb <= bytepos; bb++) {
496 if (i + bb >= dlen)
497 break;
498 tbuf[j++] = hmap[blob[i + bb] >> 4 & 0x0f];
499 tbuf[j++] = hmap[blob[i + bb] & 0x0f];
500 }
501 }
502 }
503 return j;
504 }
505
506 /**
507 * A reader used to dump binary records: this function first of all
508 * builds a proper human readable header to identify the record with the
509 * usual debuglevel and timestamps and then DUMPS some of the binary blob
510 * in ASCII human readable form: how much is dumped depends on the module
511 * param scsc_decode_binary_len (default 8 bytes).
512 * ANYWAY ONLY ONE WHOLE RECORD IS DUMPED OR NOTHING IF IT DOES NOT FIT
513 * THE PROVIDED DESTINATION BUFFER TBUF.
514 */
515 static inline
516 size_t tag_reader_binary(char *tbuf, struct scsc_ring_buffer *rb,
517 int start_rec, size_t tsz)
518 {
519 size_t written;
520 int declen = scsc_decode_binary_len;
521 struct scsc_ring_record *rrec;
522 char bheader[SCSC_HBUF_LEN] = {};
523 char binfo[SCSC_BINFO_LEN] = {};
524 size_t max_chunk;
525
526 rrec = (struct scsc_ring_record *)SCSC_GET_PTR(rb, start_rec);
527 if (declen < 0 || declen > rrec->len)
528 declen = rrec->len;
529 if (declen)
530 snprintf(binfo, SCSC_BINFO_LEN, "HEX[%d/%d]: ",
531 declen, rrec->len);
532 written = build_header(bheader, SCSC_HBUF_LEN, rrec,
533 declen ? binfo : "");
534 /* Account for byte decoding: two ASCII char for each byte */
535 max_chunk = written + (declen * 2);
536 if (max_chunk <= tsz) {
537 memcpy(tbuf, bheader, written);
538 if (declen)
539 written = binary_hexdump(tbuf, tsz - written,
540 rrec, written, declen);
541 tbuf[written] = '\n';
542 written++;
543 } else {
544 written = 0;
545 }
546 return written;
547 }
548
549 /**
550 * This is a utility function to read from the specified ring_buffer
551 * up to 'tsz' amount of data starting from position record 'start_rec'.
552 * This function reads ONLY UP TO ONE RECORD and returns the effective
553 * amount of data bytes read; it invokes the proper tag_reader_* helper
554 * depending on the specific record is handling.
555 * Data is copied to a TEMP BUFFER provided by user of this function,
556 * IF AND ONLY IF a whole record CAN fit into the space available in the
557 * destination buffer, otherwise record is NOT copied and 0 is returned.
558 * This function DOES NOT SLEEP.
559 * Caller IS IN CHARGE to SOLVE any sync issue on provided tbuf and
560 * underlying ring buffer.
561 *
562 * @tbuf: a temp buffer destination for the read data
563 * @rb: the ring_buffer to use.
564 * @start_rec: the record from which to start expressed as a record
565 * starting position.
566 * @tsz: the available space in tbuf
567 * @return size_t: returns the bytes effectively read.
568 */
569 static inline size_t
570 _read_one_whole_record(void *tbuf, struct scsc_ring_buffer *rb,
571 int start_rec, size_t tsz)
572 {
573 if (SCSC_GET_REC_TAG(SCSC_GET_PTR(rb, start_rec)) > LAST_BIN_TAG)
574 return tag_reader_string(tbuf, rb, start_rec, tsz);
575 else
576 return tag_reader_binary(tbuf, rb, start_rec, tsz);
577 }
578
579
580 /**
581 * This just inject a string into the buffer to signal we've gone
582 * OUT OF SYNC due to Ring WRAPPING too FAST, noting how many bytes
583 * we resynced.
584 */
585 static inline size_t mark_out_of_sync(char *tbuf, size_t tsz,
586 int resynced_bytes)
587 {
588 size_t written = 0;
589 struct timeval tval = {};
590
591 tval = ns_to_timeval(local_clock());
592 /* We should write something even if truncated ... */
593 written = scnprintf(tbuf, tsz,
594 "<7>[%6lu.%06ld] [c%d] [P] [OOS] :: [[[ OUT OF SYNC -- RESYNC'ED BYTES %d ]]]\n",
595 tval.tv_sec, tval.tv_usec, smp_processor_id(),
596 resynced_bytes);
597 return written;
598 }
599
600 /**
601 * Attempt resync searching for SYNC pattern and verifying CRC.
602 * ASSUMES that the invalid_pos provided is anyway safe to access, since
603 * it should be checked by the caller in advance.
604 * The amount of resynced bytes are not necessarily the number of bytes
605 * effectively lost....they could be much more...imagine the ring had
606 * overwrap multiple times before detecting OUT OF SYNC.
607 */
608 static inline loff_t reader_resync(struct scsc_ring_buffer *rb,
609 loff_t invalid_pos, int *resynced_bytes)
610 {
611 int bytes = 0;
612 loff_t sync_pos = rb->head;
613 struct scsc_ring_record *candidate = SCSC_GET_REC(rb, invalid_pos);
614
615 *resynced_bytes = 0;
616 /* Walking thorugh the ring in search of the sync one byte at time */
617 while (invalid_pos != rb->head &&
618 !SCSC_IS_REC_SYNC_VALID(candidate)) {
619 invalid_pos = (invalid_pos < rb->last) ?
620 (invalid_pos + sizeof(u8)) : 0;
621 bytes += sizeof(u8);
622 candidate = SCSC_GET_REC(rb, invalid_pos);
623 }
624 if (invalid_pos == rb->head ||
625 (SCSC_IS_REC_SYNC_VALID(candidate) &&
626 is_record_crc_valid(candidate, invalid_pos))) {
627 sync_pos = invalid_pos;
628 *resynced_bytes = bytes;
629 }
630 return sync_pos;
631 }
632
633 /**
634 * An Internal API ring function to retrieve into the provided tbuf
635 * up to N WHOLE RECORDS starting from *next_rec.
636 * It STOPS collecting records if:
637 * - NO MORE RECORDS TO READ: last_read_record record is head
638 * - NO MORE SPACE: on provided destination tbuf to collect
639 * one more WHOLE record
640 * - MAX NUMBER OF REQUIRED RECORDS READ: if max_recs was passed in
641 * as ZERO it means read as much as you can till head is reached.
642 *
643 * If at start it detects and OUT OF SYNC, so that next_rec is
644 * NO MORE pointing to a valid record, it tries to RE-SYNC on next
645 * GOOD KNOWN record or to HEAD as last resource and injects into
646 * the user buffer an OUT OF SYNC marker record.
647 *
648 * ASSUMES proper locking and syncing ALREADY inplace...does NOT SLEEP.
649 */
650 size_t read_next_records(struct scsc_ring_buffer *rb, int max_recs,
651 loff_t *last_read_rec, void *tbuf, size_t tsz)
652 {
653 size_t bytes_read = 0, last_read = -1;
654 int resynced_bytes = 0, records = 0;
655 loff_t next_rec = 0;
656
657 /* Nothing to read...simply return 0 causing reader to exit */
658 if (*last_read_rec == rb->head)
659 return bytes_read;
660 if (!is_ring_read_pos_valid(rb, *last_read_rec)) {
661 if (is_ring_pos_safe(rb, *last_read_rec)) {
662 /* Try to resync from *last_read_rec INVALID POS */
663 next_rec = reader_resync(rb, *last_read_rec,
664 &resynced_bytes);
665 } else {
666 /* Skip to head...ONLY safe place known in tis case. */
667 resynced_bytes = 0;
668 next_rec = rb->head;
669 }
670 bytes_read += mark_out_of_sync(tbuf, tsz, resynced_bytes);
671 } else {
672 /* next to read....we're surely NOT already at rb->head here */
673 next_rec = (*last_read_rec != rb->last) ?
674 SCSC_GET_NEXT_SLOT_POS(rb, *last_read_rec) : 0;
675 }
676 do {
677 /* Account for last read */
678 last_read = bytes_read;
679 bytes_read +=
680 _read_one_whole_record(tbuf + bytes_read, rb,
681 next_rec, tsz - bytes_read);
682 /* Did a WHOLE record fit into available tbuf ? */
683 if (bytes_read != last_read) {
684 records++;
685 *last_read_rec = next_rec;
686 if (*last_read_rec != rb->head)
687 next_rec = (next_rec != rb->last) ?
688 SCSC_GET_NEXT_SLOT_POS(rb, next_rec) : 0;
689 }
690 } while (*last_read_rec != rb->head &&
691 last_read != bytes_read &&
692 (!max_recs || records <= max_recs));
693
694 return bytes_read;
695 }
696
697 /**
698 * This function returns a static snapshot of the ring that can be used
699 * for further processing using usual records operations.
700 *
701 * It returns a freshly allocated scsc_ring_buffer descriptor whose
702 * internal references are exactly the same as the original buffer
703 * being snapshot, and with all the sync machinery re-initialized.
704 * Even if the current use-case does NOT make any use of spinlocks and
705 * waitqueues in the snapshot image, we provide an initialized instance
706 * in order to be safe for future (mis-)usage.
707 *
708 * It also takes care to copy the content of original ring buffer into
709 * the new snapshot image (including the spare area) using the provided
710 * pre-allocated snap_buf.
711 *
712 * Assumes ring is already spinlocked.
713 *
714 * @rb: the original buffer to snapshot
715 * @snap_buf: the pre-allocated ring-buffer area to use for copying records
716 * @snap_sz: pre-allocated area including spare
717 * @snap_name: a human readable descriptor
718 */
719 struct scsc_ring_buffer *scsc_ring_get_snapshot(const struct scsc_ring_buffer *rb,
720 void *snap_buf, size_t snap_sz,
721 char *snap_name)
722 {
723 struct scsc_ring_buffer *snap_rb = NULL;
724
725 if (!rb || !snap_buf || !snap_name || snap_sz != rb->bsz + rb->ssz)
726 return snap_rb;
727
728 /* Here we hold a lock starving writers...try to be quick using
729 * GFP_ATOMIC since scsc_ring_buffer is small enough (144 bytes)
730 */
731 snap_rb = kzalloc(sizeof(*rb), GFP_ATOMIC);
732 if (!snap_rb)
733 return snap_rb;
734
735 /* Copy original buffer content on provided snap_buf */
736 if (memcpy(snap_buf, rb->buf, snap_sz)) {
737 snap_rb->bsz = rb->bsz;
738 snap_rb->ssz = rb->ssz;
739 snap_rb->head = rb->head;
740 snap_rb->tail = rb->tail;
741 snap_rb->last = rb->last;
742 snap_rb->written = rb->written;
743 snap_rb->records = rb->records;
744 snap_rb->wraps = rb->wraps;
745 /* this is related to reads so must be re-init */
746 snap_rb->oos = 0;
747 strncpy(snap_rb->name, snap_name, RNAME_SZ - 1);
748 /* Link the copies */
749 snap_rb->buf = snap_buf;
750 snap_rb->spare = snap_rb->buf + snap_rb->bsz;
751 /* cleanup spare */
752 memset(snap_rb->spare, 0x00, snap_rb->ssz);
753 /* Re-init snapshot copies of sync tools */
754 raw_spin_lock_init(&snap_rb->lock);
755 init_waitqueue_head(&snap_rb->wq);
756 } else {
757 kfree(snap_rb);
758 snap_rb = NULL;
759 }
760
761 return snap_rb;
762 }
763
764 /* Assumes ring is already spinlocked. */
765 void scsc_ring_truncate(struct scsc_ring_buffer *rb)
766 {
767 rb->head = 0;
768 rb->tail = 0;
769 rb->records = 0;
770 rb->written = 0;
771 rb->wraps = 0;
772 rb->last = 0;
773 memset(rb->buf + rb->head, 0x00, SCSC_RINGREC_SZ);
774 }
775
776 /**
777 * alloc_ring_buffer - Allocates and initializes a basic ring buffer,
778 * including a basic spare area where to handle strings-splitting when
779 * buffer wraps. Basic spinlock/mutex init takes place here too.
780 *
781 * @bsz: the size of the ring buffer to allocate in bytes
782 * @ssz: the size of the spare area to allocate in bytes
783 * @name: a name for this ring buffer
784 */
785 struct scsc_ring_buffer __init *alloc_ring_buffer(size_t bsz, size_t ssz,
786 const char *name)
787 {
788 struct scsc_ring_buffer *rb = kmalloc(sizeof(*rb), GFP_KERNEL);
789
790 if (!rb)
791 return NULL;
792 rb->bsz = bsz;
793 rb->ssz = ssz;
794 #ifndef CONFIG_SCSC_STATIC_RING_SIZE
795 rb->buf = kzalloc(rb->bsz + rb->ssz, GFP_KERNEL);
796 if (!rb->buf) {
797 kfree(rb);
798 return NULL;
799 }
800 #else
801 rb->buf = a_ring;
802 #endif
803 rb->head = 0;
804 rb->tail = 0;
805 rb->last = 0;
806 rb->written = 0;
807 rb->records = 0;
808 rb->wraps = 0;
809 rb->oos = 0;
810 rb->spare = rb->buf + rb->bsz;
811 memset(rb->name, 0x00, RNAME_SZ);
812 strncpy(rb->name, name, RNAME_SZ - 1);
813 raw_spin_lock_init(&rb->lock);
814 init_waitqueue_head(&rb->wq);
815
816 return rb;
817 }
818
819 /*
820 * free_ring_buffer - Free the ring what else...
821 * ...does NOT account for spinlocks existence currently
822 *
823 * @rb: a pointer to the ring buffer to free
824 */
825 void free_ring_buffer(struct scsc_ring_buffer *rb)
826 {
827 if (!rb)
828 return;
829 #ifndef CONFIG_SCSC_STATIC_RING_SIZE
830 kfree(rb->buf);
831 #endif
832 kfree(rb);
833 }