1 /****************************************************************************
3 * Copyright (c) 2016-2018 Samsung Electronics Co., Ltd. All rights reserved.
5 ****************************************************************************/
7 #include "scsc_logring_ring.h"
9 #ifdef CONFIG_SCSC_STATIC_RING_SIZE
10 static char a_ring
[CONFIG_SCSC_STATIC_RING_SIZE
+ BASE_SPARE_SZ
] __aligned(4);
13 static int scsc_decode_binary_len
= DEFAULT_BIN_DECODE_LEN
;
14 module_param(scsc_decode_binary_len
, int, S_IRUGO
| S_IWUSR
);
15 SCSC_MODPARAM_DESC(scsc_decode_binary_len
,
16 "When reading a binary record dump these bytes-len in ASCII human readable form when reading",
17 "run-time", DEFAULT_BIN_DECODE_LEN
);
20 * NOTE_CREATING_TAGS: when adding a tag string here REMEMBER to add
21 * it also where required, taking care to maintain the same ordering.
22 * (Search 4 NOTE_CREATING_TAGS)
24 const char *tagstr
[MAX_TAG
+ 1] = {
30 "wlbt", /* this is the generic one...NO_TAG */
64 #ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
98 * Calculate and returns the CRC32 for the provided record and record pos.
99 * Before calculating the CRC32 the crc field is temporarily substituted
100 * with the 32 LSB record relative starting position.
101 * Assumes the rec ptr area-validity has been checked upstream in the
103 * We SKIP the fixed blob of the SYNC field that is placed ahead of
105 * Assumes the related ring buffer is currently atomically accessed by
106 * caller. MUST NOT SLEEP.
108 static inline uint32_t get_calculated_crc(struct scsc_ring_record
*rec
,
111 uint32_t calculated_crc
= 0;
112 uint32_t saved_crc
= 0;
114 saved_crc
= rec
->crc
;
115 rec
->crc
= (uint32_t)pos
;
116 /* we skip the fixed sync calculating crc */
118 crc32_le(~0, (unsigned char const *)&rec
->crc
,
119 SCSC_CRC_RINGREC_SZ
);
120 rec
->crc
= saved_crc
;
121 return calculated_crc
;
125 * Checks for record CRC sanity.
126 * Assumes the related ring buffer is currently atomically accessed by
127 * caller. MUST NOT SLEEP.
129 static inline bool is_record_crc_valid(struct scsc_ring_record
*rec
,
132 uint32_t calculated_crc
= 0;
134 calculated_crc
= get_calculated_crc(rec
, pos
);
135 return calculated_crc
== rec
->crc
;
139 * Calculate the proper CRC and set it into the crc field
140 * Assumes the related ring buffer is currently atomically accessed by
141 * caller. MUST NOT SLEEP.
143 static inline void finalize_record_crc(struct scsc_ring_record
*rec
,
146 uint32_t calculated_crc
= 0;
150 rec
->crc
= (uint32_t)pos
;
152 crc32_le(~0, (unsigned char const *)&rec
->crc
,
153 SCSC_CRC_RINGREC_SZ
);
154 rec
->crc
= calculated_crc
;
158 * This function analyzes the pos provided relative to the provided
159 * ring, just to understand if it can be safely dereferenced.
160 * Assumes RING is already locked.
162 static inline bool is_ring_pos_safe(struct scsc_ring_buffer
*rb
,
165 if (!rb
|| pos
> rb
->bsz
|| pos
< 0)
168 if (rb
->head
> rb
->tail
&& pos
> rb
->head
)
171 if (rb
->head
< rb
->tail
&&
172 (pos
> rb
->head
&& pos
< rb
->tail
))
178 * This sanitizes record header before using it.
179 * It must be in the proper area related to head and tail and
180 * the CRC must fit the header.
182 static inline bool is_ring_read_pos_valid(struct scsc_ring_buffer
*rb
,
185 if (!is_ring_pos_safe(rb
, pos
))
187 /* We do not check for SYNC before CRC since most of the time
188 * you are NOT OutOfSync and so you MUST check CRC anyway.
189 * It will be useful only for resync.
190 * At last...Check CRC ... doing this check LAST avoids the risk of
191 * dereferencing an already dangling pos pointer.
193 if (!is_record_crc_valid(SCSC_GET_REC(rb
, pos
), pos
))
204 * Buid a header into the provided buffer,
205 * and append the optional trail string
208 int build_header(char *buf
, int blen
, struct scsc_ring_record
*r
,
212 struct timeval tval
= {};
214 tval
= ns_to_timeval(r
->nsec
);
215 written
= scnprintf(buf
, blen
,
216 "<%d>[%6lu.%06ld] [c%d] [%c] [%s] :: %s",
217 r
->lev
, tval
.tv_sec
, tval
.tv_usec
,
218 r
->core
, (char)r
->ctx
, tagstr
[r
->tag
],
225 * We're going to overwrite something writing from the head toward the tail
226 * so we must search for the next tail far enough from head in oder not to be
227 * overwritten: that will be our new tail after the wrap over.
230 loff_t
find_next_tail_far_enough_from_start(struct scsc_ring_buffer
*rb
,
231 loff_t start
, int len
)
233 loff_t new_tail
= rb
->tail
;
235 while (start
+ len
>= new_tail
&& new_tail
!= rb
->last
) {
236 new_tail
= SCSC_GET_NEXT_REC_ENTRY_POS(rb
, new_tail
);
239 if (start
+ len
>= new_tail
) {
247 * This handles the just plain append of a record to head without
248 * any need of wrapping or overwriting current tail
249 * You can provide two buffer here: the second, hbuf, is optional
250 * and will be written first. This is to account for the binary case
251 * in which the record data are written at first into the spare area
252 * (like we do with var strings, BUT then the bulk of binary data is
253 * written directly in place into the ring without double copies.
256 void scsc_ring_buffer_plain_append(struct scsc_ring_buffer
*rb
,
257 const char *srcbuf
, int slen
,
258 const char *hbuf
, int hlen
)
260 /* empty condition is special case */
262 rb
->head
+= SCSC_GET_SLOT_LEN(rb
, rb
->head
);
264 memcpy(SCSC_GET_HEAD_PTR(rb
), hbuf
, hlen
);
267 memcpy(SCSC_GET_HEAD_PTR(rb
) + hlen
, srcbuf
, slen
);
268 finalize_record_crc((struct scsc_ring_record
*)SCSC_GET_HEAD_PTR(rb
),
271 if (rb
->head
> rb
->last
)
277 * This handles the case in which appending current record must account
278 * for overwriting: this sitiation can happen at the end of ring if we do NOT
279 * have enough space for the current record, or in any place when the buffer
280 * has wrapped, head is before tail and there's not enough space to write
281 * between current head and tail.
284 void scsc_ring_buffer_overlap_append(struct scsc_ring_buffer
*rb
,
285 const char *srcbuf
, int slen
,
286 const char *hbuf
, int hlen
)
288 if (rb
->head
< rb
->tail
&&
289 slen
+ hlen
< rb
->bsz
- SCSC_GET_NEXT_SLOT_POS(rb
, rb
->head
))
290 rb
->head
+= SCSC_GET_SLOT_LEN(rb
, rb
->head
);
298 find_next_tail_far_enough_from_start(rb
, rb
->head
, slen
+ hlen
);
300 memcpy(SCSC_GET_HEAD_PTR(rb
), hbuf
, hlen
);
303 memcpy(SCSC_GET_HEAD_PTR(rb
) + hlen
, srcbuf
, slen
);
304 finalize_record_crc((struct scsc_ring_record
*)SCSC_GET_HEAD_PTR(rb
),
307 if (rb
->head
> rb
->last
)
313 * This uses the spare area to prepare the record descriptor and to expand
314 * the format string into the spare area in order to get the final lenght of
315 * the whole record+data. Data is pre-pended with a header representing the
316 * data hold in binary form in the record descriptor.
317 * This data duplication helps when we'll read back a record holding string
318 * data, we won't have to build the header on the fly during the read.
321 int tag_writer_string(char *spare
, int tag
, int lev
,
322 int prepend_header
, const char *msg_head
, va_list args
)
325 char bheader
[SCSC_HBUF_LEN
] = {};
326 struct scsc_ring_record
*rrec
;
328 /* Fill record in place */
329 rrec
= (struct scsc_ring_record
*)spare
;
330 SCSC_FILL_RING_RECORD(rrec
, tag
, lev
);
332 build_header(bheader
, SCSC_HBUF_LEN
, rrec
, NULL
);
333 written
= scnprintf(SCSC_GET_REC_BUF(spare
),
334 BASE_SPARE_SZ
- SCSC_RINGREC_SZ
, "%s", bheader
);
338 * vscnprintf retvalue is the number of characters which have been
339 * written into the @buf NOT including the trailing '\0'.
340 * If @size is == 0 the function returns 0.
341 * Here we enforce a line lenght limit equal to
342 * BASE_SPARE_SZ - SCSC_RINGREC_SZ.
344 written
+= vscnprintf(SCSC_GET_REC_BUF(spare
) + written
,
345 BASE_SPARE_SZ
- SCSC_RINGREC_SZ
- written
,
347 /* complete record metadata */
353 * A ring API function to push variable length format string into the buffer
354 * After the record has been created and pushed into the ring any process
355 * waiting on the related waiting queue is awakened.
357 int push_record_string(struct scsc_ring_buffer
*rb
, int tag
, int lev
,
358 int prepend_header
, const char *msg_head
, va_list args
)
364 /* Prepare ring_record and header if needed */
365 raw_spin_lock_irqsave(&rb
->lock
, flags
);
366 rec_len
= tag_writer_string(rb
->spare
, tag
, lev
, prepend_header
,
368 /* Line too long anyway drop */
369 if (rec_len
>= BASE_SPARE_SZ
- SCSC_RINGREC_SZ
) {
370 raw_spin_unlock_irqrestore(&rb
->lock
, flags
);
373 free_bytes
= SCSC_RING_FREE_BYTES(rb
);
375 * Evaluate if it's a trivial append or if we must account for
376 * any overwrap. Note that we do NOT truncate record across ring
377 * boundaries, if a record does NOT fit at the end of buffer,
378 * we'll write it from start directly.
380 if (rec_len
+ SCSC_RINGREC_SZ
< free_bytes
)
381 scsc_ring_buffer_plain_append(rb
, rb
->spare
,
382 SCSC_RINGREC_SZ
+ rec_len
,
385 scsc_ring_buffer_overlap_append(rb
, rb
->spare
,
386 SCSC_RINGREC_SZ
+ rec_len
,
388 rb
->written
+= rec_len
;
389 raw_spin_unlock_irqrestore(&rb
->lock
, flags
);
390 /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
391 wake_up_interruptible(&rb
->wq
);
395 /* This simply builds up a record descriptor for a binary entry. */
397 int tag_writer_binary(char *spare
, int tag
, int lev
, size_t hexlen
)
399 struct scsc_ring_record
*rrec
;
401 rrec
= (struct scsc_ring_record
*)spare
;
402 SCSC_FILL_RING_RECORD(rrec
, tag
, lev
);
409 * A ring API function to push binary data into the ring buffer. Binary data
410 * is copied from the start/len specified location.
411 * After the record has been created and pushed into the ring any process
412 * waiting on the related waiting queue is awakened.
414 int push_record_blob(struct scsc_ring_buffer
*rb
, int tag
, int lev
,
415 int prepend_header
, const void *start
, size_t len
)
420 if (len
> SCSC_MAX_BIN_BLOB_SZ
)
421 len
= SCSC_MAX_BIN_BLOB_SZ
;
422 /* Prepare ring_record and header if needed */
423 raw_spin_lock_irqsave(&rb
->lock
, flags
);
424 memset(rb
->spare
, 0x00, rb
->ssz
);
425 tag_writer_binary(rb
->spare
, tag
, lev
, len
);
426 free_bytes
= SCSC_RING_FREE_BYTES(rb
);
427 if (len
+ SCSC_RINGREC_SZ
< free_bytes
)
428 scsc_ring_buffer_plain_append(rb
, start
, len
,
429 rb
->spare
, SCSC_RINGREC_SZ
);
431 scsc_ring_buffer_overlap_append(rb
, start
, len
,
432 rb
->spare
, SCSC_RINGREC_SZ
);
434 raw_spin_unlock_irqrestore(&rb
->lock
, flags
);
435 /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
436 wake_up_interruptible(&rb
->wq
);
440 /* A simple reader used to retrieve a string from the record
441 * It always return ONE WHOLE RECORD if it fits the provided tbuf OR NOTHING.
444 size_t tag_reader_string(char *tbuf
, struct scsc_ring_buffer
*rb
,
445 int start_rec
, size_t tsz
)
447 size_t max_chunk
= SCSC_GET_REC_LEN(SCSC_GET_PTR(rb
, start_rec
));
449 if (max_chunk
<= tsz
)
450 memcpy(tbuf
, SCSC_GET_REC_BUF(rb
->buf
+ start_rec
), max_chunk
);
457 * Helper to dump binary data in ASCII readable form up to
458 * scsc_decode_binary_len bytes: when such modparam is set to -1
459 * this will dump all the available data. Data is dumped onto the
460 * output buffer with an endianity that conforms to the data as
461 * dumped by the print_hex_dump() kernel standard facility.
464 int binary_hexdump(char *tbuf
, int tsz
, struct scsc_ring_record
*rrec
,
468 unsigned char *blob
= SCSC_GET_REC_BUF(rrec
);
469 char *hmap
= "0123456789abcdef";
472 * Scan the buffer reversing endianity when appropriate and
473 * producing ASCII human readable output while obeying chosen
474 * maximum decoden_len dlen.
476 for (j
= start
, i
= 0; j
< tsz
&& i
< rrec
->len
&& i
< dlen
; i
+= 4) {
477 bytepos
= (rrec
->len
- i
- 1 >= 3) ? 3 : rrec
->len
- i
- 1;
478 /* Reverse endianity to little only on 4-byte boundary */
480 for (; bytepos
>= 0; bytepos
--) {
481 if (i
+ bytepos
>= dlen
)
483 tbuf
[j
++] = hmap
[blob
[i
+ bytepos
] >> 4 & 0x0f];
484 tbuf
[j
++] = hmap
[blob
[i
+ bytepos
] & 0x0f];
490 * Trailing bytes NOT aligned on a 4-byte boundary
491 * should be decoded maintaining the original endianity.
492 * This way we obtain a binary output perfectly equal
493 * to the one generated by the original UDI tools.
495 for (bb
= 0; bb
<= bytepos
; bb
++) {
498 tbuf
[j
++] = hmap
[blob
[i
+ bb
] >> 4 & 0x0f];
499 tbuf
[j
++] = hmap
[blob
[i
+ bb
] & 0x0f];
507 * A reader used to dump binary records: this function first of all
508 * builds a proper human readable header to identify the record with the
509 * usual debuglevel and timestamps and then DUMPS some of the binary blob
510 * in ASCII human readable form: how much is dumped depends on the module
511 * param scsc_decode_binary_len (default 8 bytes).
512 * ANYWAY ONLY ONE WHOLE RECORD IS DUMPED OR NOTHING IF IT DOES NOT FIT
513 * THE PROVIDED DESTINATION BUFFER TBUF.
516 size_t tag_reader_binary(char *tbuf
, struct scsc_ring_buffer
*rb
,
517 int start_rec
, size_t tsz
)
520 int declen
= scsc_decode_binary_len
;
521 struct scsc_ring_record
*rrec
;
522 char bheader
[SCSC_HBUF_LEN
] = {};
523 char binfo
[SCSC_BINFO_LEN
] = {};
526 rrec
= (struct scsc_ring_record
*)SCSC_GET_PTR(rb
, start_rec
);
527 if (declen
< 0 || declen
> rrec
->len
)
530 snprintf(binfo
, SCSC_BINFO_LEN
, "HEX[%d/%d]: ",
532 written
= build_header(bheader
, SCSC_HBUF_LEN
, rrec
,
533 declen
? binfo
: "");
534 /* Account for byte decoding: two ASCII char for each byte */
535 max_chunk
= written
+ (declen
* 2);
536 if (max_chunk
<= tsz
) {
537 memcpy(tbuf
, bheader
, written
);
539 written
= binary_hexdump(tbuf
, tsz
- written
,
540 rrec
, written
, declen
);
541 tbuf
[written
] = '\n';
550 * This is a utility function to read from the specified ring_buffer
551 * up to 'tsz' amount of data starting from position record 'start_rec'.
552 * This function reads ONLY UP TO ONE RECORD and returns the effective
553 * amount of data bytes read; it invokes the proper tag_reader_* helper
554 * depending on the specific record is handling.
555 * Data is copied to a TEMP BUFFER provided by user of this function,
556 * IF AND ONLY IF a whole record CAN fit into the space available in the
557 * destination buffer, otherwise record is NOT copied and 0 is returned.
558 * This function DOES NOT SLEEP.
559 * Caller IS IN CHARGE to SOLVE any sync issue on provided tbuf and
560 * underlying ring buffer.
562 * @tbuf: a temp buffer destination for the read data
563 * @rb: the ring_buffer to use.
564 * @start_rec: the record from which to start expressed as a record
566 * @tsz: the available space in tbuf
567 * @return size_t: returns the bytes effectively read.
570 _read_one_whole_record(void *tbuf
, struct scsc_ring_buffer
*rb
,
571 int start_rec
, size_t tsz
)
573 if (SCSC_GET_REC_TAG(SCSC_GET_PTR(rb
, start_rec
)) > LAST_BIN_TAG
)
574 return tag_reader_string(tbuf
, rb
, start_rec
, tsz
);
576 return tag_reader_binary(tbuf
, rb
, start_rec
, tsz
);
581 * This just inject a string into the buffer to signal we've gone
582 * OUT OF SYNC due to Ring WRAPPING too FAST, noting how many bytes
585 static inline size_t mark_out_of_sync(char *tbuf
, size_t tsz
,
589 struct timeval tval
= {};
591 tval
= ns_to_timeval(local_clock());
592 /* We should write something even if truncated ... */
593 written
= scnprintf(tbuf
, tsz
,
594 "<7>[%6lu.%06ld] [c%d] [P] [OOS] :: [[[ OUT OF SYNC -- RESYNC'ED BYTES %d ]]]\n",
595 tval
.tv_sec
, tval
.tv_usec
, smp_processor_id(),
601 * Attempt resync searching for SYNC pattern and verifying CRC.
602 * ASSUMES that the invalid_pos provided is anyway safe to access, since
603 * it should be checked by the caller in advance.
604 * The amount of resynced bytes are not necessarily the number of bytes
605 * effectively lost....they could be much more...imagine the ring had
606 * overwrap multiple times before detecting OUT OF SYNC.
608 static inline loff_t
reader_resync(struct scsc_ring_buffer
*rb
,
609 loff_t invalid_pos
, int *resynced_bytes
)
612 loff_t sync_pos
= rb
->head
;
613 struct scsc_ring_record
*candidate
= SCSC_GET_REC(rb
, invalid_pos
);
616 /* Walking thorugh the ring in search of the sync one byte at time */
617 while (invalid_pos
!= rb
->head
&&
618 !SCSC_IS_REC_SYNC_VALID(candidate
)) {
619 invalid_pos
= (invalid_pos
< rb
->last
) ?
620 (invalid_pos
+ sizeof(u8
)) : 0;
622 candidate
= SCSC_GET_REC(rb
, invalid_pos
);
624 if (invalid_pos
== rb
->head
||
625 (SCSC_IS_REC_SYNC_VALID(candidate
) &&
626 is_record_crc_valid(candidate
, invalid_pos
))) {
627 sync_pos
= invalid_pos
;
628 *resynced_bytes
= bytes
;
634 * An Internal API ring function to retrieve into the provided tbuf
635 * up to N WHOLE RECORDS starting from *next_rec.
636 * It STOPS collecting records if:
637 * - NO MORE RECORDS TO READ: last_read_record record is head
638 * - NO MORE SPACE: on provided destination tbuf to collect
639 * one more WHOLE record
640 * - MAX NUMBER OF REQUIRED RECORDS READ: if max_recs was passed in
641 * as ZERO it means read as much as you can till head is reached.
643 * If at start it detects and OUT OF SYNC, so that next_rec is
644 * NO MORE pointing to a valid record, it tries to RE-SYNC on next
645 * GOOD KNOWN record or to HEAD as last resource and injects into
646 * the user buffer an OUT OF SYNC marker record.
648 * ASSUMES proper locking and syncing ALREADY inplace...does NOT SLEEP.
650 size_t read_next_records(struct scsc_ring_buffer
*rb
, int max_recs
,
651 loff_t
*last_read_rec
, void *tbuf
, size_t tsz
)
653 size_t bytes_read
= 0, last_read
= -1;
654 int resynced_bytes
= 0, records
= 0;
657 /* Nothing to read...simply return 0 causing reader to exit */
658 if (*last_read_rec
== rb
->head
)
660 if (!is_ring_read_pos_valid(rb
, *last_read_rec
)) {
661 if (is_ring_pos_safe(rb
, *last_read_rec
)) {
662 /* Try to resync from *last_read_rec INVALID POS */
663 next_rec
= reader_resync(rb
, *last_read_rec
,
666 /* Skip to head...ONLY safe place known in tis case. */
670 bytes_read
+= mark_out_of_sync(tbuf
, tsz
, resynced_bytes
);
672 /* next to read....we're surely NOT already at rb->head here */
673 next_rec
= (*last_read_rec
!= rb
->last
) ?
674 SCSC_GET_NEXT_SLOT_POS(rb
, *last_read_rec
) : 0;
677 /* Account for last read */
678 last_read
= bytes_read
;
680 _read_one_whole_record(tbuf
+ bytes_read
, rb
,
681 next_rec
, tsz
- bytes_read
);
682 /* Did a WHOLE record fit into available tbuf ? */
683 if (bytes_read
!= last_read
) {
685 *last_read_rec
= next_rec
;
686 if (*last_read_rec
!= rb
->head
)
687 next_rec
= (next_rec
!= rb
->last
) ?
688 SCSC_GET_NEXT_SLOT_POS(rb
, next_rec
) : 0;
690 } while (*last_read_rec
!= rb
->head
&&
691 last_read
!= bytes_read
&&
692 (!max_recs
|| records
<= max_recs
));
698 * This function returns a static snapshot of the ring that can be used
699 * for further processing using usual records operations.
701 * It returns a freshly allocated scsc_ring_buffer descriptor whose
702 * internal references are exactly the same as the original buffer
703 * being snapshot, and with all the sync machinery re-initialized.
704 * Even if the current use-case does NOT make any use of spinlocks and
705 * waitqueues in the snapshot image, we provide an initialized instance
706 * in order to be safe for future (mis-)usage.
708 * It also takes care to copy the content of original ring buffer into
709 * the new snapshot image (including the spare area) using the provided
710 * pre-allocated snap_buf.
712 * Assumes ring is already spinlocked.
714 * @rb: the original buffer to snapshot
715 * @snap_buf: the pre-allocated ring-buffer area to use for copying records
716 * @snap_sz: pre-allocated area including spare
717 * @snap_name: a human readable descriptor
719 struct scsc_ring_buffer
*scsc_ring_get_snapshot(const struct scsc_ring_buffer
*rb
,
720 void *snap_buf
, size_t snap_sz
,
723 struct scsc_ring_buffer
*snap_rb
= NULL
;
725 if (!rb
|| !snap_buf
|| !snap_name
|| snap_sz
!= rb
->bsz
+ rb
->ssz
)
728 /* Here we hold a lock starving writers...try to be quick using
729 * GFP_ATOMIC since scsc_ring_buffer is small enough (144 bytes)
731 snap_rb
= kzalloc(sizeof(*rb
), GFP_ATOMIC
);
735 /* Copy original buffer content on provided snap_buf */
736 if (memcpy(snap_buf
, rb
->buf
, snap_sz
)) {
737 snap_rb
->bsz
= rb
->bsz
;
738 snap_rb
->ssz
= rb
->ssz
;
739 snap_rb
->head
= rb
->head
;
740 snap_rb
->tail
= rb
->tail
;
741 snap_rb
->last
= rb
->last
;
742 snap_rb
->written
= rb
->written
;
743 snap_rb
->records
= rb
->records
;
744 snap_rb
->wraps
= rb
->wraps
;
745 /* this is related to reads so must be re-init */
747 strncpy(snap_rb
->name
, snap_name
, RNAME_SZ
- 1);
748 /* Link the copies */
749 snap_rb
->buf
= snap_buf
;
750 snap_rb
->spare
= snap_rb
->buf
+ snap_rb
->bsz
;
752 memset(snap_rb
->spare
, 0x00, snap_rb
->ssz
);
753 /* Re-init snapshot copies of sync tools */
754 raw_spin_lock_init(&snap_rb
->lock
);
755 init_waitqueue_head(&snap_rb
->wq
);
764 /* Assumes ring is already spinlocked. */
765 void scsc_ring_truncate(struct scsc_ring_buffer
*rb
)
773 memset(rb
->buf
+ rb
->head
, 0x00, SCSC_RINGREC_SZ
);
777 * alloc_ring_buffer - Allocates and initializes a basic ring buffer,
778 * including a basic spare area where to handle strings-splitting when
779 * buffer wraps. Basic spinlock/mutex init takes place here too.
781 * @bsz: the size of the ring buffer to allocate in bytes
782 * @ssz: the size of the spare area to allocate in bytes
783 * @name: a name for this ring buffer
785 struct scsc_ring_buffer __init
*alloc_ring_buffer(size_t bsz
, size_t ssz
,
788 struct scsc_ring_buffer
*rb
= kmalloc(sizeof(*rb
), GFP_KERNEL
);
794 #ifndef CONFIG_SCSC_STATIC_RING_SIZE
795 rb
->buf
= kzalloc(rb
->bsz
+ rb
->ssz
, GFP_KERNEL
);
810 rb
->spare
= rb
->buf
+ rb
->bsz
;
811 memset(rb
->name
, 0x00, RNAME_SZ
);
812 strncpy(rb
->name
, name
, RNAME_SZ
- 1);
813 raw_spin_lock_init(&rb
->lock
);
814 init_waitqueue_head(&rb
->wq
);
820 * free_ring_buffer - Free the ring what else...
821 * ...does NOT account for spinlocks existence currently
823 * @rb: a pointer to the ring buffer to free
825 void free_ring_buffer(struct scsc_ring_buffer
*rb
)
829 #ifndef CONFIG_SCSC_STATIC_RING_SIZE