2 * Linux Packet (skb) interface
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $
31 #include <bcmendian.h>
35 #include <linux/random.h>
40 #include <dngl_stats.h>
44 #include "linux_osl_priv.h"
46 #ifdef CONFIG_DHD_USE_STATIC_BUF
48 bcm_static_buf_t
*bcm_static_buf
= 0;
49 bcm_static_pkt_t
*bcm_static_skb
= 0;
51 void* wifi_platform_prealloc(void *adapter
, int section
, unsigned long size
);
52 #endif /* CONFIG_DHD_USE_STATIC_BUF */
54 #ifdef BCM_OBJECT_TRACE
55 /* don't clear the first 4 byte that is the pkt sn */
56 #define OSL_PKTTAG_CLEAR(p) \
58 struct sk_buff *s = (struct sk_buff *)(p); \
59 uint tagsz = sizeof(s->cb); \
60 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
61 memset(s->cb + 4, 0, tagsz - 4); \
64 #define OSL_PKTTAG_CLEAR(p) \
66 struct sk_buff *s = (struct sk_buff *)(p); \
67 uint tagsz = sizeof(s->cb); \
68 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
69 memset(s->cb, 0, tagsz); \
71 #endif /* BCM_OBJECT_TRACE */
73 int osl_static_mem_init(osl_t
*osh
, void *adapter
)
75 #ifdef CONFIG_DHD_USE_STATIC_BUF
76 if (!bcm_static_buf
&& adapter
) {
77 if (!(bcm_static_buf
= (bcm_static_buf_t
*)wifi_platform_prealloc(adapter
,
78 DHD_PREALLOC_OSL_BUF
, STATIC_BUF_SIZE
+ STATIC_BUF_TOTAL_LEN
))) {
79 printk("can not alloc static buf!\n");
80 bcm_static_skb
= NULL
;
81 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
84 printk("succeed to alloc static buf\n");
87 spin_lock_init(&bcm_static_buf
->static_lock
);
89 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
92 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
93 if (!bcm_static_skb
&& adapter
) {
95 void *skb_buff_ptr
= 0;
96 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
97 skb_buff_ptr
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_SKB_BUF
, 0);
99 printk("cannot alloc static buf!\n");
100 bcm_static_buf
= NULL
;
101 bcm_static_skb
= NULL
;
102 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
106 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*) *
107 (STATIC_PKT_MAX_NUM
));
108 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
109 bcm_static_skb
->pkt_use
[i
] = 0;
112 #ifdef DHD_USE_STATIC_CTRLBUF
113 spin_lock_init(&bcm_static_skb
->osl_pkt_lock
);
114 bcm_static_skb
->last_allocated_index
= 0;
116 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
117 #endif /* DHD_USE_STATIC_CTRLBUF */
119 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
120 #endif /* CONFIG_DHD_USE_STATIC_BUF */
125 int osl_static_mem_deinit(osl_t
*osh
, void *adapter
)
127 #ifdef CONFIG_DHD_USE_STATIC_BUF
128 if (bcm_static_buf
) {
132 if (bcm_static_skb
) {
136 #endif /* CONFIG_DHD_USE_STATIC_BUF */
141 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
142 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
143 * explicitly managed from a coherency perspective.
145 static inline void BCMFASTPATH
146 osl_fwderbuf_reset(osl_t
*osh
, struct sk_buff
*skb
)
150 static struct sk_buff
* BCMFASTPATH
151 osl_alloc_skb(osl_t
*osh
, unsigned int len
)
154 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
155 gfp_t flags
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
156 #ifdef DHD_USE_ATOMIC_PKTGET
158 #endif /* DHD_USE_ATOMIC_PKTGET */
159 skb
= __dev_alloc_skb(len
, flags
);
161 skb
= dev_alloc_skb(len
);
162 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
167 /* Convert a driver packet to native(OS) packet
168 * In the process, packettag is zeroed out before sending up
169 * IP code depends on skb->cb to be setup correctly with various options
170 * In our case, that means it should be 0
172 struct sk_buff
* BCMFASTPATH
173 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
175 struct sk_buff
*nskb
;
178 OSL_PKTTAG_CLEAR(pkt
);
180 /* Decrement the packet counter */
181 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
182 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->cmn
->pktalloced
);
185 return (struct sk_buff
*)pkt
;
188 /* Convert a native(OS) packet to driver packet.
189 * In the process, native packet is destroyed, there is no copying
190 * Also, a packettag is zeroed out
193 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
195 struct sk_buff
*cskb
;
196 struct sk_buff
*nskb
;
197 unsigned long pktalloced
= 0;
200 OSL_PKTTAG_CLEAR(pkt
);
202 /* walk the PKTCLINK() list */
203 for (cskb
= (struct sk_buff
*)pkt
;
205 cskb
= PKTISCHAINED(cskb
) ? PKTCLINK(cskb
) : NULL
) {
207 /* walk the pkt buffer list */
208 for (nskb
= cskb
; nskb
; nskb
= nskb
->next
) {
210 /* Increment the packet counter */
213 /* clean the 'prev' pointer
214 * Kernel 3.18 is leaving skb->prev pointer set to skb
215 * to indicate a non-fragmented skb
217 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
219 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
224 /* Increment the packet counter */
225 atomic_add(pktalloced
, &osh
->cmn
->pktalloced
);
230 /* Return a new packet. zero out pkttag */
232 #ifdef BCM_OBJECT_TRACE
233 linux_pktget(osl_t
*osh
, uint len
, int line
, const char *caller
)
235 linux_pktget(osl_t
*osh
, uint len
)
236 #endif /* BCM_OBJECT_TRACE */
240 if (lmtest
!= FALSE
) {
241 get_random_bytes(&num
, sizeof(uchar
));
242 if ((num
+ 1) <= (256 * lmtest
/ 100))
246 if ((skb
= osl_alloc_skb(osh
, len
))) {
251 atomic_inc(&osh
->cmn
->pktalloced
);
252 #ifdef BCM_OBJECT_TRACE
253 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
254 #endif /* BCM_OBJECT_TRACE */
257 return ((void*) skb
);
260 /* Free the driver packet. Free the tag if present */
262 #ifdef BCM_OBJECT_TRACE
263 linux_pktfree(osl_t
*osh
, void *p
, bool send
, int line
, const char *caller
)
265 linux_pktfree(osl_t
*osh
, void *p
, bool send
)
266 #endif /* BCM_OBJECT_TRACE */
268 struct sk_buff
*skb
, *nskb
;
272 skb
= (struct sk_buff
*) p
;
275 if (osh
->pub
.tx_fn
) {
276 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
279 if (osh
->pub
.rx_fn
) {
280 osh
->pub
.rx_fn(osh
->pub
.rx_ctx
, p
);
284 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
286 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
287 if (skb
&& (skb
->mac_len
== PREALLOC_USED_MAGIC
)) {
288 printk("%s: pkt %p is from static pool\n",
294 if (skb
&& (skb
->mac_len
== PREALLOC_FREE_MAGIC
)) {
295 printk("%s: pkt %p is from static pool and not in used\n",
300 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
302 /* perversion: we use skb->next to chain multi-skb packets */
307 #ifdef BCM_OBJECT_TRACE
308 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, caller
, line
);
309 #endif /* BCM_OBJECT_TRACE */
312 if (skb
->destructor
) {
313 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
316 dev_kfree_skb_any(skb
);
318 /* can free immediately (even in_irq()) if destructor
324 atomic_dec(&osh
->cmn
->pktalloced
);
329 #ifdef CONFIG_DHD_USE_STATIC_BUF
331 osl_pktget_static(osl_t
*osh
, uint len
)
335 #ifdef DHD_USE_STATIC_CTRLBUF
337 #endif /* DHD_USE_STATIC_CTRLBUF */
340 return linux_pktget(osh
, len
);
342 if (len
> DHD_SKB_MAX_BUFSIZE
) {
343 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
344 return linux_pktget(osh
, len
);
347 #ifdef DHD_USE_STATIC_CTRLBUF
348 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
350 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
352 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
353 index
= bcm_static_skb
->last_allocated_index
% STATIC_PKT_2PAGE_NUM
;
354 bcm_static_skb
->last_allocated_index
++;
355 if (bcm_static_skb
->skb_8k
[index
] &&
356 bcm_static_skb
->pkt_use
[index
] == 0) {
361 if (i
< STATIC_PKT_2PAGE_NUM
) {
362 bcm_static_skb
->pkt_use
[index
] = 1;
363 skb
= bcm_static_skb
->skb_8k
[index
];
364 skb
->data
= skb
->head
;
365 #ifdef NET_SKBUFF_DATA_USES_OFFSET
366 skb_set_tail_pointer(skb
, PKT_HEADROOM_DEFAULT
);
368 skb
->tail
= skb
->data
+ PKT_HEADROOM_DEFAULT
;
369 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
370 skb
->data
+= PKT_HEADROOM_DEFAULT
;
373 #ifdef NET_SKBUFF_DATA_USES_OFFSET
374 skb_set_tail_pointer(skb
, len
);
376 skb
->tail
= skb
->data
+ len
;
377 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
379 skb
->mac_len
= PREALLOC_USED_MAGIC
;
380 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
385 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
386 printk("%s: all static pkt in use!\n", __FUNCTION__
);
389 down(&bcm_static_skb
->osl_pkt_sem
);
391 if (len
<= DHD_SKB_1PAGE_BUFSIZE
) {
392 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
393 if (bcm_static_skb
->skb_4k
[i
] &&
394 bcm_static_skb
->pkt_use
[i
] == 0) {
399 if (i
!= STATIC_PKT_1PAGE_NUM
) {
400 bcm_static_skb
->pkt_use
[i
] = 1;
402 skb
= bcm_static_skb
->skb_4k
[i
];
403 #ifdef NET_SKBUFF_DATA_USES_OFFSET
404 skb_set_tail_pointer(skb
, len
);
406 skb
->tail
= skb
->data
+ len
;
407 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
410 up(&bcm_static_skb
->osl_pkt_sem
);
415 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
416 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
417 if (bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
] &&
418 bcm_static_skb
->pkt_use
[i
] == 0) {
423 if ((i
>= STATIC_PKT_1PAGE_NUM
) && (i
< STATIC_PKT_1_2PAGE_NUM
)) {
424 bcm_static_skb
->pkt_use
[i
] = 1;
425 skb
= bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
];
426 #ifdef NET_SKBUFF_DATA_USES_OFFSET
427 skb_set_tail_pointer(skb
, len
);
429 skb
->tail
= skb
->data
+ len
;
430 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
433 up(&bcm_static_skb
->osl_pkt_sem
);
438 #if defined(ENHANCED_STATIC_BUF)
439 if (bcm_static_skb
->skb_16k
&&
440 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] == 0) {
441 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 1;
443 skb
= bcm_static_skb
->skb_16k
;
444 #ifdef NET_SKBUFF_DATA_USES_OFFSET
445 skb_set_tail_pointer(skb
, len
);
447 skb
->tail
= skb
->data
+ len
;
448 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
451 up(&bcm_static_skb
->osl_pkt_sem
);
454 #endif /* ENHANCED_STATIC_BUF */
456 up(&bcm_static_skb
->osl_pkt_sem
);
457 printk("%s: all static pkt in use!\n", __FUNCTION__
);
458 return linux_pktget(osh
, len
);
459 #endif /* DHD_USE_STATIC_CTRLBUF */
463 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
466 #ifdef DHD_USE_STATIC_CTRLBUF
467 struct sk_buff
*skb
= (struct sk_buff
*)p
;
469 #endif /* DHD_USE_STATIC_CTRLBUF */
475 if (!bcm_static_skb
) {
476 linux_pktfree(osh
, p
, send
);
480 #ifdef DHD_USE_STATIC_CTRLBUF
481 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
483 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
484 if (p
== bcm_static_skb
->skb_8k
[i
]) {
485 if (bcm_static_skb
->pkt_use
[i
] == 0) {
486 printk("%s: static pkt idx %d(%p) is double free\n",
489 bcm_static_skb
->pkt_use
[i
] = 0;
492 if (skb
->mac_len
!= PREALLOC_USED_MAGIC
) {
493 printk("%s: static pkt idx %d(%p) is not in used\n",
497 skb
->mac_len
= PREALLOC_FREE_MAGIC
;
498 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
503 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
504 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__
, p
);
506 down(&bcm_static_skb
->osl_pkt_sem
);
507 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
508 if (p
== bcm_static_skb
->skb_4k
[i
]) {
509 bcm_static_skb
->pkt_use
[i
] = 0;
510 up(&bcm_static_skb
->osl_pkt_sem
);
515 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
516 if (p
== bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
]) {
517 bcm_static_skb
->pkt_use
[i
] = 0;
518 up(&bcm_static_skb
->osl_pkt_sem
);
522 #ifdef ENHANCED_STATIC_BUF
523 if (p
== bcm_static_skb
->skb_16k
) {
524 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 0;
525 up(&bcm_static_skb
->osl_pkt_sem
);
529 up(&bcm_static_skb
->osl_pkt_sem
);
530 #endif /* DHD_USE_STATIC_CTRLBUF */
531 linux_pktfree(osh
, p
, send
);
533 #endif /* CONFIG_DHD_USE_STATIC_BUF */
536 * The pkttag contents are NOT cloned.
539 #ifdef BCM_OBJECT_TRACE
540 osl_pktdup(osl_t
*osh
, void *skb
, int line
, const char *caller
)
542 osl_pktdup(osl_t
*osh
, void *skb
)
543 #endif /* BCM_OBJECT_TRACE */
547 ASSERT(!PKTISCHAINED(skb
));
549 /* clear the CTFBUF flag if set and map the rest of the buffer
554 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
557 /* skb_clone copies skb->cb.. we don't want that */
561 /* Increment the packet counter */
562 atomic_inc(&osh
->cmn
->pktalloced
);
563 #ifdef BCM_OBJECT_TRACE
564 bcm_object_trace_opr(p
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
565 #endif /* BCM_OBJECT_TRACE */
571 * BINOSL selects the slightly slower function-call-based binary compatible osl.
575 osl_pktalloced(osl_t
*osh
)
577 if (atomic_read(&osh
->cmn
->refcount
) == 1)
578 return (atomic_read(&osh
->cmn
->pktalloced
));
583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
584 #include <linux/kallsyms.h>
585 #include <net/sock.h>
587 osl_pkt_orphan_partial(struct sk_buff
*skb
, int tsq
)
590 static void *p_tcp_wfree
= NULL
;
595 if (!skb
->destructor
|| skb
->destructor
== sock_wfree
)
598 if (unlikely(!p_tcp_wfree
)) {
599 char sym
[KSYM_SYMBOL_LEN
];
600 sprint_symbol(sym
, (unsigned long)skb
->destructor
);
602 if (!strcmp(sym
, "tcp_wfree"))
603 p_tcp_wfree
= skb
->destructor
;
608 if (unlikely(skb
->destructor
!= p_tcp_wfree
|| !skb
->sk
))
611 /* abstract a certain portion of skb truesize from the socket
612 * sk_wmem_alloc to allow more skb can be allocated for this
613 * socket for better cusion meeting WiFi device requirement
615 fraction
= skb
->truesize
* (tsq
- 1) / tsq
;
616 skb
->truesize
-= fraction
;
618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
619 atomic_sub(fraction
, &skb
->sk
->sk_wmem_alloc
.refs
);
621 atomic_sub(fraction
, &skb
->sk
->sk_wmem_alloc
);
625 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */