[RAMEN9610-20413][9610] wlbt: SCSC Driver version 10.6.1.0
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / net / wireless / scsc / mbulk.h
1 /******************************************************************************
2 *
3 * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
4 *
5 *****************************************************************************/
6
7 #ifndef __MBULK_H__
8 #define __MBULK_H__
9
10 /**
11 * mbulk(bulk memory) API
12 *
13 * This header file describes APIs of the bulk memory management.
14 * The diagram below is an example of a mbulk buffer with one
15 * segment (i.e. not a chained mbulk).
16 *
17 * sig_bufsz
18 * |<-------->|
19 * | |<--------- dat_bufsz ---------->|
20 * +--------------------------------------------------+
21 * | mbulk| signal | bulk buffer |
22 * +-------------------------+---------------+--------+
23 * | | | valid data | |
24 * | | |<--------+---->| |
25 * | | | mbulk_tlen(m) | |
26 * | |<----->| | |<------>|
27 * | mbulk_headroom(m)| | mbulk_tailroom(m)
28 * | | |
29 * | |-- off -->|
30 * v v |
31 * mbulk_get_sig(m) mbulk_dat(m) |
32 * v
33 * mbulk_dat_at(m,off)
34 *
35 * In general, all clients are supposed to use only mbulk_xxx() APIs (but not
36 * mbulk_seg_xxx() APIs), as they can handle S/G chained mbulk as well.
37 * But as of now, specially in Condor, S/G chained mbulk is not supported,
38 * which means the most of mbulk_xxx() would be wrappers of mbulk_seg_xxx().
39 *
40 * An in-lined signal buffer can be allocated along with a mbulk buffer.
41 * There is no direct life-cycle relationship between the signal and the
42 * associated mbulk in this case, which means that the signal buffer should be
43 * de-allocated independently of the mbulk buffer.
44 *
45 */
46
47 /**
48 * bulk buffer descriptor
49 */
50 struct mbulk;
51
52 /**
53 * mbulk host pool ID
54 */
55 #define MBULK_POOL_ID_DATA (0)
56 #define MBULK_POOL_ID_CTRL (1)
57 #define MBULK_POOL_ID_MAX (2)
58
59 /**
60 * mbulk buffer classification
61 *
62 * Note that PACKED attribute is added to enum definition so that
63 * compiler assigns the smallest integral type (u8).
64 */
65 enum mbulk_class {
66 MBULK_CLASS_CONTROL = 0,
67 MBULK_CLASS_HOSTIO = 1,
68 MBULK_CLASS_DEBUG = 2,
69
70 MBULK_CLASS_FROM_HOST_DAT = 3,
71 MBULK_CLASS_FROM_HOST_CTL = 4,
72 MBULK_CLASS_FROM_RADIO = 5,
73 MBULK_CLASS_DPLP = 6,
74 MBULK_CLASS_OTHERS = 7,
75 MBULK_CLASS_FROM_RADIO_FORWARDED = 8,
76 MBULK_CLASS_MAX
77 } __packed;
78
79 /**
80 * The private definition of mbulk structure is included here
81 * so that its members can be directly accessed, and the access
82 * codes can be in-lined by the compiler.
83 * But client codes are not supposed to directly refer to mbulk
84 * members, nor use mbulk_seg_xxx() functions. Only modules handling
85 * mbulk scatter/gather chain would directly use mulk_seg_xxx() APIs.
86 */
87 #include "mbulk_def.h"
88
89 /**
90 * Get the bulk data reference counter
91 *
92 * After a bulk buffer with non-zero data buffer size is created,
93 * the reference counter is set to one. Each time it is duplicated,
94 * its reference counter would be increased.
95 *
96 * Note that the reference counter is initialized to zero if a signal
97 * is created from mbulk pool but with zero data buffer size, as there
98 * is no data buffer.
99 */
100 static inline int mbulk_refcnt(const struct mbulk *m)
101 {
102 return MBULK_SEG_REFCNT(m);
103 }
104
105 /**
106 * Get the bulk data buffer size
107 *
108 */
109 static inline int mbulk_buffer_size(const struct mbulk *m)
110 {
111 return MBULK_SEG_DAT_BUFSIZE(m);
112 }
113
114 /**
115 * Check if mbulk has an in-lined signal buffer
116 *
117 */
118 static inline bool mbulk_has_signal(const struct mbulk *m)
119 {
120 return MBULK_SEG_HAS_SIGNAL(m);
121 }
122
123 /**
124 * Set mbulk to be read-only
125 */
126 static inline void mbulk_set_readonly(struct mbulk *m)
127 {
128 MBULK_SEG_SET_READONLY(m);
129 }
130
131 /**
132 * is mbulk read-only
133 */
134 static inline bool mbulk_is_readonly(const struct mbulk *m)
135 {
136 return MBULK_SEG_IS_READONLY(m);
137 }
138
139 /**
140 * check if mbulk is a scatter/gather chained buffer
141 *
142 */
143 static inline bool mbulk_is_sg(const struct mbulk *m)
144 {
145 return MBULK_SEG_IS_CHAIN_HEAD(m);
146 }
147
148 /**
149 * check if mbulk is a part of scatter/gather chained buffer
150 *
151 */
152 static inline bool mbulk_is_chained(const struct mbulk *m)
153 {
154 return MBULK_SEG_IS_CHAINED(m);
155 }
156
157 /**
158 * Allocate a bulk buffer with an in-lined signal buffer
159 *
160 * Only one mbulk segment is used for allocation starting from the
161 * mbulk pool with the smallest segment size. If no segment fitting
162 * the requested size, then return NULL without trying to create
163 * a chained buffer.
164 *
165 */
166 struct mbulk *mbulk_with_signal_alloc(enum mbulk_class clas, size_t sig_bufsz,
167 size_t dat_bufsz);
168 /**
169 * Allocate a bulk buffer with an in-lined signal buffer
170 *
171 * A mbulk segment is allocated from the given the pool, if its size
172 * meeting the requested size.
173 *
174 */
175 struct mbulk *mbulk_with_signal_alloc_by_pool(u8 pool_id, u16 colour,
176 enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz);
177
178 /**
179 * Get the number of free mbulk slots in a pool
180 *
181 * Returns the number of mbulk slots available in a given pool.
182 */
183 int mbulk_pool_get_free_count(u8 pool_id);
184
185 /**
186 * Get a signal buffer address
187 *
188 * Given a mbulk buffer, returns a signal buffer address.
189 *
190 * @param m mbulk
191 * @return in-lined signal buffer
192 */
193 static inline void *mbulk_get_seg(const struct mbulk *m)
194 {
195 return (void *)MBULK_SEG_B(m);
196 }
197
198 /**
199 * Get a signal buffer address
200 *
201 * Given a mbulk buffer, returns a signal buffer address if any in-lined
202 * signal buffer.
203 *
204 */
205 static inline void *mbulk_get_signal(const struct mbulk *m)
206 {
207 bool ret = false;
208
209 ret = mbulk_has_signal(m);
210
211 return ret ? mbulk_get_seg(m) : NULL;
212 }
213
214 /**
215 * Allocate a bulk buffer
216 *
217 * Only one mbulk segment is used for allocation starting from the
218 * mbulk pool with the smallest segment size. If no segment fitting
219 * the requested size, then return NULL without trying to create
220 * a chained buffer.
221 *
222 */
223 static inline struct mbulk *mbulk_alloc(enum mbulk_class clas, size_t dat_bufsz)
224 {
225 return mbulk_with_signal_alloc(clas, 0, dat_bufsz);
226 }
227
228 /**
229 * free mbulk buffer
230 *
231 * After checking the bulk reference counter, this function return the buffer
232 * to the mbulk pool if it is zero. Note that this doesn't free the in-lined
233 * signal buffer.
234 */
235 static inline void mbulk_free(struct mbulk *m)
236 {
237 mbulk_seg_free(m);
238 }
239
240 /**
241 * get bulk buffer address for read or write access
242 *
243 * The address is the buffer address after the headroom in the mbulk segment.
244 * Note that this function can only be used to access the data in the same
245 * segment, including a segment in the mbulk chain (for example, to access
246 * the 802.11 header of A-MSDU).
247 *
248 */
249 static inline void *mbulk_dat_rw(const struct mbulk *m)
250 {
251 WARN_ON(MBULK_SEG_IS_READONLY(m));
252 return MBULK_SEG_DAT(m);
253 }
254
255 /**
256 * get bulk buffer address for read-only
257 *
258 * The address is the buffer address after the headroom in the mbulk segment.
259 * Note that this function can only be used to access the data in the same
260 * segment, including a segment in the mbulk chain (for example, to access
261 * the 802.11 header of A-MSDU).
262 *
263 */
264 static inline const void *mbulk_dat_r(const struct mbulk *m)
265 {
266 return (const void *)MBULK_SEG_DAT(m);
267 }
268
269 /**
270 * get bulk buffer address at the offset for read or write access
271 *
272 */
273 static inline void *mbulk_dat_at_rw(const struct mbulk *m, size_t off)
274 {
275 WARN_ON(MBULK_SEG_IS_READONLY(m));
276 return MBULK_SEG_DAT_AT(m, off);
277 }
278
279 /**
280 * get bulk buffer address at the offset for read access
281 *
282 */
283 static inline /*const*/ void *mbulk_dat_at_r(const struct mbulk *m, size_t off)
284 {
285 return (/*const */ void *)MBULK_SEG_DAT_AT(m, off);
286 }
287
288 /**
289 * get valid data length
290 *
291 */
292 static inline size_t mbulk_tlen(const struct mbulk *m)
293 {
294 return MBULK_SEG_LEN(m);
295 }
296
297 /**
298 * get headroom
299 *
300 */
301 static inline size_t mbulk_headroom(const struct mbulk *m)
302 {
303 return MBULK_SEG_HEADROOM(m);
304 }
305
306 static inline size_t mbulk_tailroom(const struct mbulk *m)
307 {
308 return MBULK_SEG_TAILROOM(m);
309 }
310
311 /**
312 * reserve headroom
313 *
314 * Note this API should be called right after mbulk is created or the valid
315 * data length is zero.
316 *
317 */
318 static inline bool mbulk_reserve_head(struct mbulk *m, size_t headroom)
319 {
320 return mbulk_seg_reserve_head(m, headroom);
321 }
322
323 /**
324 * adjust the valid data range
325 *
326 * headroom would be placed after the signal buffer (or mbuf descriptor if
327 * no in-lined signal), and the valid data length is set to \len.
328 *
329 */
330 static inline bool mbulk_adjust_range(struct mbulk *m, size_t headroom, size_t len)
331 {
332 return mbulk_seg_adjust_range(m, headroom, len);
333 }
334
335 /**
336 * extend the data range at the head
337 *
338 * The headroom would be reduced, and the data range is extended.
339 * To prepend data in the head, the headroom should have been reserved before.
340 *
341 */
342 static inline bool mbulk_prepend_head(struct mbulk *m, size_t more)
343 {
344 return mbulk_seg_prepend_head(m, more);
345 }
346
347 /**
348 * extend the data at the tail
349 *
350 * Data range is expanded towards the tail.
351 *
352 */
353 static inline bool mbulk_append_tail(struct mbulk *m, size_t more)
354 {
355 return mbulk_seg_append_tail(m, more);
356 }
357
358 /**
359 * trim data at the head
360 *
361 * The headroom would be increased, and the valid data range is reduced
362 * accordingly.
363 *
364 */
365 static inline bool mbulk_trim_head(struct mbulk *m, size_t less)
366 {
367 return mbulk_seg_trim_head(m, less);
368 }
369
370 /**
371 * trim data at the tail
372 *
373 * The data length would be reduced.
374 *
375 */
376 static inline bool mbulk_trim_tail(struct mbulk *m, size_t less)
377 {
378 return mbulk_seg_trim_tail(m, less);
379 }
380
381 /**
382 * duplicate a mbulk
383 *
384 * There is no data copy. but the referece counter of the orignal mbulk is
385 * increased by one.
386 *
387 */
388 static inline struct mbulk *mbulk_duplicate(struct mbulk *m)
389 {
390 return mbulk_seg_duplicate(m);
391 }
392
393 /**
394 * clone a mbulk
395 *
396 * New mbulk buffer is created, and contents are copied. The signal is copied
397 * only when \copy_sig is TRUE.
398 *
399 */
400 static inline struct mbulk *mbulk_clone(const struct mbulk *m, enum mbulk_class clas,
401 bool copy_sig)
402 {
403 return mbulk_seg_clone(m, clas, copy_sig);
404 }
405
406 /**
407 * allocate a signal buffer from mbulk pool
408 *
409 */
410 void *msignal_alloc(size_t sig_sz);
411
412 /**
413 * free a signal buffer created from mbulk pool
414 *
415 */
416 void msignal_free(void *sig);
417
418 /**
419 * get mbulk descriptor given a signal buffer address
420 *
421 */
422 struct mbulk *msignal_to_mbulk(void *sig);
423
424 /**
425 * get next chained mbulk in a scatter/gathered list
426 */
427 static inline scsc_mifram_ref mbulk_chain_next(struct mbulk *m)
428 {
429 return MBULK_SEG_CHAIN_NEXT(m);
430 }
431
432 #ifdef MBULK_SUPPORT_SG_CHAIN
433 /**
434 * Scatter/Gather Chained Mbulk APIs
435 * =================================
436 */
437
438 /**
439 * allocate a chained mbulk buffer from a specific mbulk pool
440 *
441 */
442 struct mbulk *mbulk_chain_with_signal_alloc_by_pool(u8 pool_id,
443 enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz);
444
445 /**
446 * free a chained mbulk
447 */
448 void mbulk_chain_free(struct mbulk *sg);
449
450 /**
451 * get a tail mbulk in the chain
452 *
453 */
454 struct mbulk *mbulk_chain_tail(struct mbulk *m);
455
456 /**
457 * total buffer size in a chanied mbulk
458 *
459 */
460 size_t mbulk_chain_bufsz(struct mbulk *m);
461
462 /**
463 * total data length in a chanied mbulk
464 *
465 */
466 size_t mbulk_chain_tlen(struct mbulk *m);
467
468 /**
469 * get a number of mbulk segments in a chained mbulk
470 */
471 static inline int mbulk_chain_num(const struct mbulk *m)
472 {
473 if (mbulk_is_sg(m)) {
474 int n = 0;
475
476 while (m != NULL) {
477 n++;
478 m = m->chain_next;
479 }
480 return n;
481 }
482 return 1;
483 }
484
485 /* NOT IMPLEMENTED YET. */
486 void *mbulk_chain_access(struct mbulk *m, size_t off, char *local_buf, size_t local_bufsz);
487 void *mbulk_chain_writeback(struct mbulk *m, size_t off, char *local_buf, size_t local_bufsz);
488 void *mbulk_chain_copy_from(struct mbulk *m, size_t off, char *buf, int len);
489 void *mbulk_chain_copy_to(struct mbulk *m, size_t off, char *buf, int len);
490 #endif /*MBULK_SUPPORT_SG_CHAIN*/
491
492 /**
493 * init mbulk library
494 */
495 /*extern void init_mbulk(void);*/
496 void init_mbulk(void *mem, size_t pool_size);
497
498 /**
499 * add a memory zone to a mbulk pool list
500 *
501 */
502 #ifdef CONFIG_SCSC_WLAN_DEBUG
503 int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t seg_size, u8 guard, int minor);
504 #else
505 int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t buf_size, u8 guard);
506 #endif
507 /**
508 * check sanity of a mbulk pool
509 */
510 void mbulk_pool_check_sanity(u8 pool_id);
511
512 /**
513 * configure the handler which returning the buffer to the host
514 */
515 void mbulk_set_handler_return_host_mbulk(void (*free_host_buf)(struct mbulk *m));
516
517 /**
518 * free a mbulk in the virtual host
519 */
520 void mbulk_free_virt_host(struct mbulk *m);
521 void mbulk_pool_dump(u8 pool_id, int max_cnt);
522
523 #endif /*__MBULK_H__*/