source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / linux_pkt.c
CommitLineData
1cac41cb
MB
1/*
2 * Linux Packet (skb) interface
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
5a068558 27 * $Id: linux_pkt.c 800754 2019-01-23 08:38:54Z $
1cac41cb
MB
28 */
29
30#include <typedefs.h>
31#include <bcmendian.h>
32#include <linuxver.h>
33#include <bcmdefs.h>
34
35#include <linux/random.h>
36
37#include <osl.h>
38#include <bcmutils.h>
39#include <pcicfg.h>
40
41#include <linux/fs.h>
42#include "linux_osl_priv.h"
43
44#ifdef CONFIG_DHD_USE_STATIC_BUF
45
46bcm_static_buf_t *bcm_static_buf = 0;
47bcm_static_pkt_t *bcm_static_skb = 0;
48
49void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
50#endif /* CONFIG_DHD_USE_STATIC_BUF */
51
52#ifdef BCM_OBJECT_TRACE
53/* don't clear the first 4 byte that is the pkt sn */
54#define OSL_PKTTAG_CLEAR(p) \
55do { \
56 struct sk_buff *s = (struct sk_buff *)(p); \
5a068558
MB
57 uint tagsz = sizeof(s->cb); \
58 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
59 memset(s->cb + 4, 0, tagsz - 4); \
1cac41cb
MB
60} while (0)
61#else
62#define OSL_PKTTAG_CLEAR(p) \
63do { \
64 struct sk_buff *s = (struct sk_buff *)(p); \
5a068558
MB
65 uint tagsz = sizeof(s->cb); \
66 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
67 memset(s->cb, 0, tagsz); \
1cac41cb
MB
68} while (0)
69#endif /* BCM_OBJECT_TRACE */
70
71int osl_static_mem_init(osl_t *osh, void *adapter)
72{
73#ifdef CONFIG_DHD_USE_STATIC_BUF
74 if (!bcm_static_buf && adapter) {
75 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
76 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
77 printk("can not alloc static buf!\n");
78 bcm_static_skb = NULL;
79 ASSERT(osh->magic == OS_HANDLE_MAGIC);
80 return -ENOMEM;
81 } else {
82 printk("succeed to alloc static buf\n");
83 }
84
85 spin_lock_init(&bcm_static_buf->static_lock);
86
87 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
88 }
89
90#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
91 if (!bcm_static_skb && adapter) {
92 int i;
93 void *skb_buff_ptr = 0;
94 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
95 skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
96 if (!skb_buff_ptr) {
97 printk("cannot alloc static buf!\n");
98 bcm_static_buf = NULL;
99 bcm_static_skb = NULL;
100 ASSERT(osh->magic == OS_HANDLE_MAGIC);
101 return -ENOMEM;
102 }
103
104 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
105 (STATIC_PKT_MAX_NUM));
106 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
107 bcm_static_skb->pkt_use[i] = 0;
108 }
109
110#ifdef DHD_USE_STATIC_CTRLBUF
111 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
112 bcm_static_skb->last_allocated_index = 0;
113#else
114 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
115#endif /* DHD_USE_STATIC_CTRLBUF */
116 }
117#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
118#endif /* CONFIG_DHD_USE_STATIC_BUF */
119
120 return 0;
121}
122
123int osl_static_mem_deinit(osl_t *osh, void *adapter)
124{
125#ifdef CONFIG_DHD_USE_STATIC_BUF
126 if (bcm_static_buf) {
127 bcm_static_buf = 0;
128 }
129#ifdef BCMSDIO
130 if (bcm_static_skb) {
131 bcm_static_skb = 0;
132 }
133#endif /* BCMSDIO */
134#endif /* CONFIG_DHD_USE_STATIC_BUF */
135 return 0;
136}
137
138/*
139 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
140 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
141 * explicitly managed from a coherency perspective.
142 */
143static inline void BCMFASTPATH
144osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
145{
146}
147
148static struct sk_buff * BCMFASTPATH
149osl_alloc_skb(osl_t *osh, unsigned int len)
150{
151 struct sk_buff *skb;
152#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
153 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
154#ifdef DHD_USE_ATOMIC_PKTGET
155 flags = GFP_ATOMIC;
156#endif /* DHD_USE_ATOMIC_PKTGET */
157 skb = __dev_alloc_skb(len, flags);
158#else
159 skb = dev_alloc_skb(len);
160#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
161
162 return skb;
163}
164
165/* Convert a driver packet to native(OS) packet
166 * In the process, packettag is zeroed out before sending up
167 * IP code depends on skb->cb to be setup correctly with various options
168 * In our case, that means it should be 0
169 */
170struct sk_buff * BCMFASTPATH
171osl_pkt_tonative(osl_t *osh, void *pkt)
172{
173 struct sk_buff *nskb;
174
175 if (osh->pub.pkttag)
176 OSL_PKTTAG_CLEAR(pkt);
177
178 /* Decrement the packet counter */
179 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
180 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
181
182 }
183 return (struct sk_buff *)pkt;
184}
185
186/* Convert a native(OS) packet to driver packet.
187 * In the process, native packet is destroyed, there is no copying
188 * Also, a packettag is zeroed out
189 */
190void * BCMFASTPATH
191osl_pkt_frmnative(osl_t *osh, void *pkt)
192{
193 struct sk_buff *cskb;
194 struct sk_buff *nskb;
195 unsigned long pktalloced = 0;
196
197 if (osh->pub.pkttag)
198 OSL_PKTTAG_CLEAR(pkt);
199
200 /* walk the PKTCLINK() list */
201 for (cskb = (struct sk_buff *)pkt;
202 cskb != NULL;
203 cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
204
205 /* walk the pkt buffer list */
206 for (nskb = cskb; nskb; nskb = nskb->next) {
207
208 /* Increment the packet counter */
209 pktalloced++;
210
211 /* clean the 'prev' pointer
212 * Kernel 3.18 is leaving skb->prev pointer set to skb
213 * to indicate a non-fragmented skb
214 */
215#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
216 nskb->prev = NULL;
217#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
218
219 }
220 }
221
222 /* Increment the packet counter */
223 atomic_add(pktalloced, &osh->cmn->pktalloced);
224
225 return (void *)pkt;
226}
227
228/* Return a new packet. zero out pkttag */
229#ifdef BCM_OBJECT_TRACE
230void * BCMFASTPATH
231linux_pktget(osl_t *osh, uint len, int line, const char *caller)
232#else
233void * BCMFASTPATH
234linux_pktget(osl_t *osh, uint len)
235#endif /* BCM_OBJECT_TRACE */
236{
237 struct sk_buff *skb;
238 uchar num = 0;
239 if (lmtest != FALSE) {
240 get_random_bytes(&num, sizeof(uchar));
241 if ((num + 1) <= (256 * lmtest / 100))
242 return NULL;
243 }
244
245 if ((skb = osl_alloc_skb(osh, len))) {
246 skb->tail += len;
247 skb->len += len;
248 skb->priority = 0;
249
250 atomic_inc(&osh->cmn->pktalloced);
251#ifdef BCM_OBJECT_TRACE
252 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
253#endif /* BCM_OBJECT_TRACE */
254 }
255
256 return ((void*) skb);
257}
258
259/* Free the driver packet. Free the tag if present */
260#ifdef BCM_OBJECT_TRACE
261void BCMFASTPATH
262linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
263#else
264void BCMFASTPATH
265linux_pktfree(osl_t *osh, void *p, bool send)
266#endif /* BCM_OBJECT_TRACE */
267{
268 struct sk_buff *skb, *nskb;
269 if (osh == NULL)
270 return;
271
272 skb = (struct sk_buff*) p;
273
274 if (send) {
275 if (osh->pub.tx_fn) {
276 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
277 }
278 } else {
279 if (osh->pub.rx_fn) {
280 osh->pub.rx_fn(osh->pub.rx_ctx, p);
281 }
282 }
283
284 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
285
286#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
287 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
288 printk("%s: pkt %p is from static pool\n",
289 __FUNCTION__, p);
290 dump_stack();
291 return;
292 }
293
294 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
295 printk("%s: pkt %p is from static pool and not in used\n",
296 __FUNCTION__, p);
297 dump_stack();
298 return;
299 }
300#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
301
302 /* perversion: we use skb->next to chain multi-skb packets */
303 while (skb) {
304 nskb = skb->next;
305 skb->next = NULL;
306
307#ifdef BCM_OBJECT_TRACE
308 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
309#endif /* BCM_OBJECT_TRACE */
310
311 {
312 if (skb->destructor) {
313 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
314 * destructor exists
315 */
316 dev_kfree_skb_any(skb);
317 } else {
318 /* can free immediately (even in_irq()) if destructor
319 * does not exist
320 */
321 dev_kfree_skb(skb);
322 }
323 }
324 atomic_dec(&osh->cmn->pktalloced);
325 skb = nskb;
326 }
327}
328
329#ifdef CONFIG_DHD_USE_STATIC_BUF
330void*
331osl_pktget_static(osl_t *osh, uint len)
332{
333 int i = 0;
334 struct sk_buff *skb;
335#ifdef DHD_USE_STATIC_CTRLBUF
336 unsigned long flags;
337#endif /* DHD_USE_STATIC_CTRLBUF */
338
339 if (!bcm_static_skb)
340 return linux_pktget(osh, len);
341
342 if (len > DHD_SKB_MAX_BUFSIZE) {
343 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
344 return linux_pktget(osh, len);
345 }
346
347#ifdef DHD_USE_STATIC_CTRLBUF
348 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
349
350 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
351 uint32 index;
352 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
353 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
354 bcm_static_skb->last_allocated_index++;
355 if (bcm_static_skb->skb_8k[index] &&
356 bcm_static_skb->pkt_use[index] == 0) {
357 break;
358 }
359 }
360
361 if (i < STATIC_PKT_2PAGE_NUM) {
362 bcm_static_skb->pkt_use[index] = 1;
363 skb = bcm_static_skb->skb_8k[index];
364 skb->data = skb->head;
365#ifdef NET_SKBUFF_DATA_USES_OFFSET
366 skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
367#else
368 skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
369#endif /* NET_SKBUFF_DATA_USES_OFFSET */
370 skb->data += PKT_HEADROOM_DEFAULT;
371 skb->cloned = 0;
372 skb->priority = 0;
373#ifdef NET_SKBUFF_DATA_USES_OFFSET
374 skb_set_tail_pointer(skb, len);
375#else
376 skb->tail = skb->data + len;
377#endif /* NET_SKBUFF_DATA_USES_OFFSET */
378 skb->len = len;
379 skb->mac_len = PREALLOC_USED_MAGIC;
380 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
381 return skb;
382 }
383 }
384
385 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
386 printk("%s: all static pkt in use!\n", __FUNCTION__);
387 return NULL;
388#else
389 down(&bcm_static_skb->osl_pkt_sem);
390
391 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
392 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
393 if (bcm_static_skb->skb_4k[i] &&
394 bcm_static_skb->pkt_use[i] == 0) {
395 break;
396 }
397 }
398
399 if (i != STATIC_PKT_1PAGE_NUM) {
400 bcm_static_skb->pkt_use[i] = 1;
401
402 skb = bcm_static_skb->skb_4k[i];
403#ifdef NET_SKBUFF_DATA_USES_OFFSET
404 skb_set_tail_pointer(skb, len);
405#else
406 skb->tail = skb->data + len;
407#endif /* NET_SKBUFF_DATA_USES_OFFSET */
408 skb->len = len;
409
410 up(&bcm_static_skb->osl_pkt_sem);
411 return skb;
412 }
413 }
414
415 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
416 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
417 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
418 bcm_static_skb->pkt_use[i] == 0) {
419 break;
420 }
421 }
422
423 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
424 bcm_static_skb->pkt_use[i] = 1;
425 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
426#ifdef NET_SKBUFF_DATA_USES_OFFSET
427 skb_set_tail_pointer(skb, len);
428#else
429 skb->tail = skb->data + len;
430#endif /* NET_SKBUFF_DATA_USES_OFFSET */
431 skb->len = len;
432
433 up(&bcm_static_skb->osl_pkt_sem);
434 return skb;
435 }
436 }
437
438#if defined(ENHANCED_STATIC_BUF)
439 if (bcm_static_skb->skb_16k &&
440 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
441 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
442
443 skb = bcm_static_skb->skb_16k;
444#ifdef NET_SKBUFF_DATA_USES_OFFSET
445 skb_set_tail_pointer(skb, len);
446#else
447 skb->tail = skb->data + len;
448#endif /* NET_SKBUFF_DATA_USES_OFFSET */
449 skb->len = len;
450
451 up(&bcm_static_skb->osl_pkt_sem);
452 return skb;
453 }
454#endif /* ENHANCED_STATIC_BUF */
455
456 up(&bcm_static_skb->osl_pkt_sem);
457 printk("%s: all static pkt in use!\n", __FUNCTION__);
458 return linux_pktget(osh, len);
459#endif /* DHD_USE_STATIC_CTRLBUF */
460}
461
462void
463osl_pktfree_static(osl_t *osh, void *p, bool send)
464{
465 int i;
466#ifdef DHD_USE_STATIC_CTRLBUF
467 struct sk_buff *skb = (struct sk_buff *)p;
468 unsigned long flags;
469#endif /* DHD_USE_STATIC_CTRLBUF */
470
471 if (!p) {
472 return;
473 }
474
475 if (!bcm_static_skb) {
476 linux_pktfree(osh, p, send);
477 return;
478 }
479
480#ifdef DHD_USE_STATIC_CTRLBUF
481 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
482
483 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
484 if (p == bcm_static_skb->skb_8k[i]) {
485 if (bcm_static_skb->pkt_use[i] == 0) {
486 printk("%s: static pkt idx %d(%p) is double free\n",
487 __FUNCTION__, i, p);
488 } else {
489 bcm_static_skb->pkt_use[i] = 0;
490 }
491
492 if (skb->mac_len != PREALLOC_USED_MAGIC) {
493 printk("%s: static pkt idx %d(%p) is not in used\n",
494 __FUNCTION__, i, p);
495 }
496
497 skb->mac_len = PREALLOC_FREE_MAGIC;
498 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
499 return;
500 }
501 }
502
503 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
504 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
505#else
506 down(&bcm_static_skb->osl_pkt_sem);
507 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
508 if (p == bcm_static_skb->skb_4k[i]) {
509 bcm_static_skb->pkt_use[i] = 0;
510 up(&bcm_static_skb->osl_pkt_sem);
511 return;
512 }
513 }
514
515 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
516 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
517 bcm_static_skb->pkt_use[i] = 0;
518 up(&bcm_static_skb->osl_pkt_sem);
519 return;
520 }
521 }
522#ifdef ENHANCED_STATIC_BUF
523 if (p == bcm_static_skb->skb_16k) {
524 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
525 up(&bcm_static_skb->osl_pkt_sem);
526 return;
527 }
528#endif // endif
529 up(&bcm_static_skb->osl_pkt_sem);
530#endif /* DHD_USE_STATIC_CTRLBUF */
531 linux_pktfree(osh, p, send);
532}
533#endif /* CONFIG_DHD_USE_STATIC_BUF */
534
535/* Clone a packet.
536 * The pkttag contents are NOT cloned.
537 */
538#ifdef BCM_OBJECT_TRACE
539void *
540osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
541#else
542void *
543osl_pktdup(osl_t *osh, void *skb)
544#endif /* BCM_OBJECT_TRACE */
545{
546 void * p;
547
548 ASSERT(!PKTISCHAINED(skb));
549
550 /* clear the CTFBUF flag if set and map the rest of the buffer
551 * before cloning.
552 */
553 PKTCTFMAP(osh, skb);
554
555 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
556 return NULL;
557
558 /* skb_clone copies skb->cb.. we don't want that */
559 if (osh->pub.pkttag)
560 OSL_PKTTAG_CLEAR(p);
561
562 /* Increment the packet counter */
563 atomic_inc(&osh->cmn->pktalloced);
564#ifdef BCM_OBJECT_TRACE
565 bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
566#endif /* BCM_OBJECT_TRACE */
567
568 return (p);
569}
570
571/*
572 * BINOSL selects the slightly slower function-call-based binary compatible osl.
573 */
574
575uint
576osl_pktalloced(osl_t *osh)
577{
578 if (atomic_read(&osh->cmn->refcount) == 1)
579 return (atomic_read(&osh->cmn->pktalloced));
580 else
581 return 0;
582}
583
584#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
585#include <linux/kallsyms.h>
586#include <net/sock.h>
587void
588osl_pkt_orphan_partial(struct sk_buff *skb)
589{
590 uint32 fraction;
591 static void *p_tcp_wfree = NULL;
592
593 if (!skb->destructor || skb->destructor == sock_wfree)
594 return;
595
596 if (unlikely(!p_tcp_wfree)) {
597 char sym[KSYM_SYMBOL_LEN];
598 sprint_symbol(sym, (unsigned long)skb->destructor);
599 sym[9] = 0;
600 if (!strcmp(sym, "tcp_wfree"))
601 p_tcp_wfree = skb->destructor;
602 else
603 return;
604 }
605
606 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
607 return;
608
609 /* abstract a certain portion of skb truesize from the socket
610 * sk_wmem_alloc to allow more skb can be allocated for this
611 * socket for better cusion meeting WiFi device requirement
612 */
613 fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
614 skb->truesize -= fraction;
615 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
616}
617#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */