2 * Linux Packet (skb) interface
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: linux_pkt.c 800754 2019-01-23 08:38:54Z $
31 #include <bcmendian.h>
35 #include <linux/random.h>
42 #include "linux_osl_priv.h"
44 #ifdef CONFIG_DHD_USE_STATIC_BUF
46 bcm_static_buf_t
*bcm_static_buf
= 0;
47 bcm_static_pkt_t
*bcm_static_skb
= 0;
49 void* wifi_platform_prealloc(void *adapter
, int section
, unsigned long size
);
50 #endif /* CONFIG_DHD_USE_STATIC_BUF */
52 #ifdef BCM_OBJECT_TRACE
53 /* don't clear the first 4 byte that is the pkt sn */
54 #define OSL_PKTTAG_CLEAR(p) \
56 struct sk_buff *s = (struct sk_buff *)(p); \
57 uint tagsz = sizeof(s->cb); \
58 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
59 memset(s->cb + 4, 0, tagsz - 4); \
62 #define OSL_PKTTAG_CLEAR(p) \
64 struct sk_buff *s = (struct sk_buff *)(p); \
65 uint tagsz = sizeof(s->cb); \
66 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
67 memset(s->cb, 0, tagsz); \
69 #endif /* BCM_OBJECT_TRACE */
71 int osl_static_mem_init(osl_t
*osh
, void *adapter
)
73 #ifdef CONFIG_DHD_USE_STATIC_BUF
74 if (!bcm_static_buf
&& adapter
) {
75 if (!(bcm_static_buf
= (bcm_static_buf_t
*)wifi_platform_prealloc(adapter
,
76 3, STATIC_BUF_SIZE
+ STATIC_BUF_TOTAL_LEN
))) {
77 printk("can not alloc static buf!\n");
78 bcm_static_skb
= NULL
;
79 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
82 printk("succeed to alloc static buf\n");
85 spin_lock_init(&bcm_static_buf
->static_lock
);
87 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
90 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
91 if (!bcm_static_skb
&& adapter
) {
93 void *skb_buff_ptr
= 0;
94 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
95 skb_buff_ptr
= wifi_platform_prealloc(adapter
, 4, 0);
97 printk("cannot alloc static buf!\n");
98 bcm_static_buf
= NULL
;
99 bcm_static_skb
= NULL
;
100 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
104 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*) *
105 (STATIC_PKT_MAX_NUM
));
106 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
107 bcm_static_skb
->pkt_use
[i
] = 0;
110 #ifdef DHD_USE_STATIC_CTRLBUF
111 spin_lock_init(&bcm_static_skb
->osl_pkt_lock
);
112 bcm_static_skb
->last_allocated_index
= 0;
114 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
115 #endif /* DHD_USE_STATIC_CTRLBUF */
117 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
118 #endif /* CONFIG_DHD_USE_STATIC_BUF */
123 int osl_static_mem_deinit(osl_t
*osh
, void *adapter
)
125 #ifdef CONFIG_DHD_USE_STATIC_BUF
126 if (bcm_static_buf
) {
130 if (bcm_static_skb
) {
134 #endif /* CONFIG_DHD_USE_STATIC_BUF */
139 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
140 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
141 * explicitly managed from a coherency perspective.
143 static inline void BCMFASTPATH
144 osl_fwderbuf_reset(osl_t
*osh
, struct sk_buff
*skb
)
148 static struct sk_buff
* BCMFASTPATH
149 osl_alloc_skb(osl_t
*osh
, unsigned int len
)
152 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
153 gfp_t flags
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
154 #ifdef DHD_USE_ATOMIC_PKTGET
156 #endif /* DHD_USE_ATOMIC_PKTGET */
157 skb
= __dev_alloc_skb(len
, flags
);
159 skb
= dev_alloc_skb(len
);
160 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
165 /* Convert a driver packet to native(OS) packet
166 * In the process, packettag is zeroed out before sending up
167 * IP code depends on skb->cb to be setup correctly with various options
168 * In our case, that means it should be 0
170 struct sk_buff
* BCMFASTPATH
171 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
173 struct sk_buff
*nskb
;
176 OSL_PKTTAG_CLEAR(pkt
);
178 /* Decrement the packet counter */
179 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
180 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->cmn
->pktalloced
);
183 return (struct sk_buff
*)pkt
;
186 /* Convert a native(OS) packet to driver packet.
187 * In the process, native packet is destroyed, there is no copying
188 * Also, a packettag is zeroed out
191 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
193 struct sk_buff
*cskb
;
194 struct sk_buff
*nskb
;
195 unsigned long pktalloced
= 0;
198 OSL_PKTTAG_CLEAR(pkt
);
200 /* walk the PKTCLINK() list */
201 for (cskb
= (struct sk_buff
*)pkt
;
203 cskb
= PKTISCHAINED(cskb
) ? PKTCLINK(cskb
) : NULL
) {
205 /* walk the pkt buffer list */
206 for (nskb
= cskb
; nskb
; nskb
= nskb
->next
) {
208 /* Increment the packet counter */
211 /* clean the 'prev' pointer
212 * Kernel 3.18 is leaving skb->prev pointer set to skb
213 * to indicate a non-fragmented skb
215 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
217 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
222 /* Increment the packet counter */
223 atomic_add(pktalloced
, &osh
->cmn
->pktalloced
);
228 /* Return a new packet. zero out pkttag */
229 #ifdef BCM_OBJECT_TRACE
231 linux_pktget(osl_t
*osh
, uint len
, int line
, const char *caller
)
234 linux_pktget(osl_t
*osh
, uint len
)
235 #endif /* BCM_OBJECT_TRACE */
239 if (lmtest
!= FALSE
) {
240 get_random_bytes(&num
, sizeof(uchar
));
241 if ((num
+ 1) <= (256 * lmtest
/ 100))
245 if ((skb
= osl_alloc_skb(osh
, len
))) {
250 atomic_inc(&osh
->cmn
->pktalloced
);
251 #ifdef BCM_OBJECT_TRACE
252 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
253 #endif /* BCM_OBJECT_TRACE */
256 return ((void*) skb
);
259 /* Free the driver packet. Free the tag if present */
260 #ifdef BCM_OBJECT_TRACE
262 linux_pktfree(osl_t
*osh
, void *p
, bool send
, int line
, const char *caller
)
265 linux_pktfree(osl_t
*osh
, void *p
, bool send
)
266 #endif /* BCM_OBJECT_TRACE */
268 struct sk_buff
*skb
, *nskb
;
272 skb
= (struct sk_buff
*) p
;
275 if (osh
->pub
.tx_fn
) {
276 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
279 if (osh
->pub
.rx_fn
) {
280 osh
->pub
.rx_fn(osh
->pub
.rx_ctx
, p
);
284 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
286 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
287 if (skb
&& (skb
->mac_len
== PREALLOC_USED_MAGIC
)) {
288 printk("%s: pkt %p is from static pool\n",
294 if (skb
&& (skb
->mac_len
== PREALLOC_FREE_MAGIC
)) {
295 printk("%s: pkt %p is from static pool and not in used\n",
300 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
302 /* perversion: we use skb->next to chain multi-skb packets */
307 #ifdef BCM_OBJECT_TRACE
308 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, caller
, line
);
309 #endif /* BCM_OBJECT_TRACE */
312 if (skb
->destructor
) {
313 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
316 dev_kfree_skb_any(skb
);
318 /* can free immediately (even in_irq()) if destructor
324 atomic_dec(&osh
->cmn
->pktalloced
);
329 #ifdef CONFIG_DHD_USE_STATIC_BUF
331 osl_pktget_static(osl_t
*osh
, uint len
)
335 #ifdef DHD_USE_STATIC_CTRLBUF
337 #endif /* DHD_USE_STATIC_CTRLBUF */
340 return linux_pktget(osh
, len
);
342 if (len
> DHD_SKB_MAX_BUFSIZE
) {
343 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
344 return linux_pktget(osh
, len
);
347 #ifdef DHD_USE_STATIC_CTRLBUF
348 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
350 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
352 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
353 index
= bcm_static_skb
->last_allocated_index
% STATIC_PKT_2PAGE_NUM
;
354 bcm_static_skb
->last_allocated_index
++;
355 if (bcm_static_skb
->skb_8k
[index
] &&
356 bcm_static_skb
->pkt_use
[index
] == 0) {
361 if (i
< STATIC_PKT_2PAGE_NUM
) {
362 bcm_static_skb
->pkt_use
[index
] = 1;
363 skb
= bcm_static_skb
->skb_8k
[index
];
364 skb
->data
= skb
->head
;
365 #ifdef NET_SKBUFF_DATA_USES_OFFSET
366 skb_set_tail_pointer(skb
, PKT_HEADROOM_DEFAULT
);
368 skb
->tail
= skb
->data
+ PKT_HEADROOM_DEFAULT
;
369 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
370 skb
->data
+= PKT_HEADROOM_DEFAULT
;
373 #ifdef NET_SKBUFF_DATA_USES_OFFSET
374 skb_set_tail_pointer(skb
, len
);
376 skb
->tail
= skb
->data
+ len
;
377 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
379 skb
->mac_len
= PREALLOC_USED_MAGIC
;
380 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
385 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
386 printk("%s: all static pkt in use!\n", __FUNCTION__
);
389 down(&bcm_static_skb
->osl_pkt_sem
);
391 if (len
<= DHD_SKB_1PAGE_BUFSIZE
) {
392 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
393 if (bcm_static_skb
->skb_4k
[i
] &&
394 bcm_static_skb
->pkt_use
[i
] == 0) {
399 if (i
!= STATIC_PKT_1PAGE_NUM
) {
400 bcm_static_skb
->pkt_use
[i
] = 1;
402 skb
= bcm_static_skb
->skb_4k
[i
];
403 #ifdef NET_SKBUFF_DATA_USES_OFFSET
404 skb_set_tail_pointer(skb
, len
);
406 skb
->tail
= skb
->data
+ len
;
407 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
410 up(&bcm_static_skb
->osl_pkt_sem
);
415 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
416 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
417 if (bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
] &&
418 bcm_static_skb
->pkt_use
[i
] == 0) {
423 if ((i
>= STATIC_PKT_1PAGE_NUM
) && (i
< STATIC_PKT_1_2PAGE_NUM
)) {
424 bcm_static_skb
->pkt_use
[i
] = 1;
425 skb
= bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
];
426 #ifdef NET_SKBUFF_DATA_USES_OFFSET
427 skb_set_tail_pointer(skb
, len
);
429 skb
->tail
= skb
->data
+ len
;
430 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
433 up(&bcm_static_skb
->osl_pkt_sem
);
438 #if defined(ENHANCED_STATIC_BUF)
439 if (bcm_static_skb
->skb_16k
&&
440 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] == 0) {
441 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 1;
443 skb
= bcm_static_skb
->skb_16k
;
444 #ifdef NET_SKBUFF_DATA_USES_OFFSET
445 skb_set_tail_pointer(skb
, len
);
447 skb
->tail
= skb
->data
+ len
;
448 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
451 up(&bcm_static_skb
->osl_pkt_sem
);
454 #endif /* ENHANCED_STATIC_BUF */
456 up(&bcm_static_skb
->osl_pkt_sem
);
457 printk("%s: all static pkt in use!\n", __FUNCTION__
);
458 return linux_pktget(osh
, len
);
459 #endif /* DHD_USE_STATIC_CTRLBUF */
463 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
466 #ifdef DHD_USE_STATIC_CTRLBUF
467 struct sk_buff
*skb
= (struct sk_buff
*)p
;
469 #endif /* DHD_USE_STATIC_CTRLBUF */
475 if (!bcm_static_skb
) {
476 linux_pktfree(osh
, p
, send
);
480 #ifdef DHD_USE_STATIC_CTRLBUF
481 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
483 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
484 if (p
== bcm_static_skb
->skb_8k
[i
]) {
485 if (bcm_static_skb
->pkt_use
[i
] == 0) {
486 printk("%s: static pkt idx %d(%p) is double free\n",
489 bcm_static_skb
->pkt_use
[i
] = 0;
492 if (skb
->mac_len
!= PREALLOC_USED_MAGIC
) {
493 printk("%s: static pkt idx %d(%p) is not in used\n",
497 skb
->mac_len
= PREALLOC_FREE_MAGIC
;
498 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
503 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
504 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__
, p
);
506 down(&bcm_static_skb
->osl_pkt_sem
);
507 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
508 if (p
== bcm_static_skb
->skb_4k
[i
]) {
509 bcm_static_skb
->pkt_use
[i
] = 0;
510 up(&bcm_static_skb
->osl_pkt_sem
);
515 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
516 if (p
== bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
]) {
517 bcm_static_skb
->pkt_use
[i
] = 0;
518 up(&bcm_static_skb
->osl_pkt_sem
);
522 #ifdef ENHANCED_STATIC_BUF
523 if (p
== bcm_static_skb
->skb_16k
) {
524 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 0;
525 up(&bcm_static_skb
->osl_pkt_sem
);
529 up(&bcm_static_skb
->osl_pkt_sem
);
530 #endif /* DHD_USE_STATIC_CTRLBUF */
531 linux_pktfree(osh
, p
, send
);
533 #endif /* CONFIG_DHD_USE_STATIC_BUF */
536 * The pkttag contents are NOT cloned.
538 #ifdef BCM_OBJECT_TRACE
540 osl_pktdup(osl_t
*osh
, void *skb
, int line
, const char *caller
)
543 osl_pktdup(osl_t
*osh
, void *skb
)
544 #endif /* BCM_OBJECT_TRACE */
548 ASSERT(!PKTISCHAINED(skb
));
550 /* clear the CTFBUF flag if set and map the rest of the buffer
555 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
558 /* skb_clone copies skb->cb.. we don't want that */
562 /* Increment the packet counter */
563 atomic_inc(&osh
->cmn
->pktalloced
);
564 #ifdef BCM_OBJECT_TRACE
565 bcm_object_trace_opr(p
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
566 #endif /* BCM_OBJECT_TRACE */
572 * BINOSL selects the slightly slower function-call-based binary compatible osl.
576 osl_pktalloced(osl_t
*osh
)
578 if (atomic_read(&osh
->cmn
->refcount
) == 1)
579 return (atomic_read(&osh
->cmn
->pktalloced
));
584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
585 #include <linux/kallsyms.h>
586 #include <net/sock.h>
588 osl_pkt_orphan_partial(struct sk_buff
*skb
)
591 static void *p_tcp_wfree
= NULL
;
593 if (!skb
->destructor
|| skb
->destructor
== sock_wfree
)
596 if (unlikely(!p_tcp_wfree
)) {
597 char sym
[KSYM_SYMBOL_LEN
];
598 sprint_symbol(sym
, (unsigned long)skb
->destructor
);
600 if (!strcmp(sym
, "tcp_wfree"))
601 p_tcp_wfree
= skb
->destructor
;
606 if (unlikely(skb
->destructor
!= p_tcp_wfree
|| !skb
->sk
))
609 /* abstract a certain portion of skb truesize from the socket
610 * sk_wmem_alloc to allow more skb can be allocated for this
611 * socket for better cusion meeting WiFi device requirement
613 fraction
= skb
->truesize
* (TSQ_MULTIPLIER
- 1) / TSQ_MULTIPLIER
;
614 skb
->truesize
-= fraction
;
615 atomic_sub(fraction
, &skb
->sk
->sk_wmem_alloc
);
617 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */