2 * Linux OS Independent Layer
4 * Copyright (C) 1999-2016, Broadcom Corporation
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: linux_osl.c 589291 2015-09-29 07:09:00Z $
33 #include <bcmendian.h>
38 #include <asm/paccess.h>
39 #include <asm/cache.h>
40 #include <asm/r4kcache.h>
44 #if !defined(STBLINUX)
45 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
46 #include <asm/cacheflush.h>
47 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
50 #include <linux/random.h>
54 #include <linux/delay.h>
56 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
57 #include <asm-generic/pci-dma-compat.h>
62 #include <linux/module.h>
63 #include <linux/kernel.h>
65 #include <linux/printk.h>
66 #include <linux/errno.h>
68 #include <linux/moduleparam.h>
70 #include <linux/skbuff.h>
71 #include <linux/vmalloc.h>
73 #include <linux/highmem.h>
74 #include <linux/dma-mapping.h>
75 #include <asm/memory.h>
76 #if defined(__ARM_ARCH_7A__)
77 #include <arch/arm/include/asm/tlbflush.h>
79 #endif /* BCM_SECURE_DMA */
84 #include <linux/spinlock.h>
85 extern spinlock_t l2x0_reg_lock
;
89 #define PCI_CFG_RETRY 10
91 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
92 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
93 #define DUMPBUFSZ 1024
95 /* dependancy check */
96 #if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
97 #error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
98 #endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
100 #ifdef CONFIG_DHD_USE_STATIC_BUF
101 #ifdef DHD_USE_STATIC_CTRLBUF
102 #define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
103 #define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
104 #define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
106 #define PREALLOC_FREE_MAGIC 0xFEDC
107 #define PREALLOC_USED_MAGIC 0xFCDE
109 #define DHD_SKB_HDRSIZE 336
110 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
111 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
112 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
113 #endif /* DHD_USE_STATIC_CTRLBUF */
115 #define STATIC_BUF_MAX_NUM 16
116 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
117 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
119 typedef struct bcm_static_buf
{
120 spinlock_t static_lock
;
121 unsigned char *buf_ptr
;
122 unsigned char buf_use
[STATIC_BUF_MAX_NUM
];
125 static bcm_static_buf_t
*bcm_static_buf
= 0;
127 #ifdef DHD_USE_STATIC_CTRLBUF
128 #define STATIC_PKT_4PAGE_NUM 0
129 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
130 #elif defined(ENHANCED_STATIC_BUF)
131 #define STATIC_PKT_4PAGE_NUM 1
132 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
134 #define STATIC_PKT_4PAGE_NUM 0
135 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
136 #endif /* DHD_USE_STATIC_CTRLBUF */
138 #ifdef DHD_USE_STATIC_CTRLBUF
139 #define STATIC_PKT_1PAGE_NUM 0
140 #define STATIC_PKT_2PAGE_NUM 64
142 #define STATIC_PKT_1PAGE_NUM 8
143 #define STATIC_PKT_2PAGE_NUM 8
144 #endif /* DHD_USE_STATIC_CTRLBUF */
146 #define STATIC_PKT_1_2PAGE_NUM \
147 ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
148 #define STATIC_PKT_MAX_NUM \
149 ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
151 typedef struct bcm_static_pkt
{
152 #ifdef DHD_USE_STATIC_CTRLBUF
153 struct sk_buff
*skb_8k
[STATIC_PKT_2PAGE_NUM
];
154 unsigned char pkt_invalid
[STATIC_PKT_2PAGE_NUM
];
155 spinlock_t osl_pkt_lock
;
156 uint32 last_allocated_index
;
158 struct sk_buff
*skb_4k
[STATIC_PKT_1PAGE_NUM
];
159 struct sk_buff
*skb_8k
[STATIC_PKT_2PAGE_NUM
];
160 #ifdef ENHANCED_STATIC_BUF
161 struct sk_buff
*skb_16k
;
162 #endif /* ENHANCED_STATIC_BUF */
163 struct semaphore osl_pkt_sem
;
164 #endif /* DHD_USE_STATIC_CTRLBUF */
165 unsigned char pkt_use
[STATIC_PKT_MAX_NUM
];
168 static bcm_static_pkt_t
*bcm_static_skb
= 0;
170 void* wifi_platform_prealloc(void *adapter
, int section
, unsigned long size
);
171 #endif /* CONFIG_DHD_USE_STATIC_BUF */
173 typedef struct bcm_mem_link
{
174 struct bcm_mem_link
*prev
;
175 struct bcm_mem_link
*next
;
179 char file
[BCM_MEM_FILENAME_LEN
];
182 struct osl_cmn_info
{
184 atomic_t pktalloced
; /* Number of allocated packet buffers */
185 spinlock_t dbgmem_lock
;
186 bcm_mem_link_t
*dbgmem_list
;
187 spinlock_t pktalloc_lock
;
188 atomic_t refcount
; /* Number of references to this shared structure. */
190 typedef struct osl_cmn_info osl_cmn_t
;
194 uint32 flags
; /* If specific cases to be handled in the OSL */
199 osl_cmn_t
*cmn
; /* Common OSL related data shred between two OSH's */
202 #ifdef BCM_SECURE_DMA
203 struct sec_mem_elem
*sec_list_4096
;
204 struct sec_mem_elem
*sec_list_base_4096
;
205 phys_addr_t contig_base
;
206 void *contig_base_va
;
207 phys_addr_t contig_base_alloc
;
208 void *contig_base_alloc_va
;
209 phys_addr_t contig_base_alloc_coherent
;
210 void *contig_base_alloc_coherent_va
;
211 void *contig_base_coherent_va
;
212 phys_addr_t contig_delta_va_pa
;
217 } sec_cma_coherent
[SEC_CMA_COHERENT_MAX
];
219 #endif /* BCM_SECURE_DMA */
222 #ifdef BCM_SECURE_DMA
223 static void * osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
,
224 bool iscache
, bool isdecr
);
225 static void osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
);
226 static int osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
227 sec_mem_elem_t
**list
);
228 static void osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
229 void *sec_list_base
);
230 static sec_mem_elem_t
* osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
,
231 int direction
, struct sec_cma_info
*ptr_cma_info
, uint offset
);
232 static void osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
);
233 static void osl_sec_dma_init_consistent(osl_t
*osh
);
234 static void *osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
,
236 static void osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
);
237 #endif /* BCM_SECURE_DMA */
239 #define OSL_PKTTAG_CLEAR(p) \
241 struct sk_buff *s = (struct sk_buff *)(p); \
242 ASSERT(OSL_PKTTAG_SZ == 32); \
243 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
244 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
245 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
246 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
249 /* PCMCIA attribute space access macros */
251 /* Global ASSERT type flag */
252 uint32 g_assert_type
= 0;
253 module_param(g_assert_type
, int, 0);
255 #ifdef BCM_SECURE_DMA
256 #define SECDMA_MODULE_PARAMS 0
257 #define SECDMA_EXT_FILE 1
259 unsigned long secdma_addr
= 0;
260 unsigned long secdma_addr2
= 0;
262 u32 secdma_size2
= 0;
263 module_param(secdma_addr
, ulong
, 0);
264 module_param(secdma_size
, int, 0);
265 module_param(secdma_addr2
, ulong
, 0);
266 module_param(secdma_size2
, int, 0);
267 static int secdma_found
= 0;
268 #endif /* BCM_SECURE_DMA */
270 static int16 linuxbcmerrormap
[] =
272 -EINVAL
, /* BCME_ERROR */
273 -EINVAL
, /* BCME_BADARG */
274 -EINVAL
, /* BCME_BADOPTION */
275 -EINVAL
, /* BCME_NOTUP */
276 -EINVAL
, /* BCME_NOTDOWN */
277 -EINVAL
, /* BCME_NOTAP */
278 -EINVAL
, /* BCME_NOTSTA */
279 -EINVAL
, /* BCME_BADKEYIDX */
280 -EINVAL
, /* BCME_RADIOOFF */
281 -EINVAL
, /* BCME_NOTBANDLOCKED */
282 -EINVAL
, /* BCME_NOCLK */
283 -EINVAL
, /* BCME_BADRATESET */
284 -EINVAL
, /* BCME_BADBAND */
285 -E2BIG
, /* BCME_BUFTOOSHORT */
286 -E2BIG
, /* BCME_BUFTOOLONG */
287 -EBUSY
, /* BCME_BUSY */
288 -EINVAL
, /* BCME_NOTASSOCIATED */
289 -EINVAL
, /* BCME_BADSSIDLEN */
290 -EINVAL
, /* BCME_OUTOFRANGECHAN */
291 -EINVAL
, /* BCME_BADCHAN */
292 -EFAULT
, /* BCME_BADADDR */
293 -ENOMEM
, /* BCME_NORESOURCE */
294 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
295 -EMSGSIZE
, /* BCME_BADLENGTH */
296 -EINVAL
, /* BCME_NOTREADY */
297 -EPERM
, /* BCME_EPERM */
298 -ENOMEM
, /* BCME_NOMEM */
299 -EINVAL
, /* BCME_ASSOCIATED */
300 -ERANGE
, /* BCME_RANGE */
301 -EINVAL
, /* BCME_NOTFOUND */
302 -EINVAL
, /* BCME_WME_NOT_ENABLED */
303 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
304 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
305 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
306 -EIO
, /* BCME_SDIO_ERROR */
307 -ENODEV
, /* BCME_DONGLE_DOWN */
308 -EINVAL
, /* BCME_VERSION */
309 -EIO
, /* BCME_TXFAIL */
310 -EIO
, /* BCME_RXFAIL */
311 -ENODEV
, /* BCME_NODEVICE */
312 -EINVAL
, /* BCME_NMODE_DISABLED */
313 -ENODATA
, /* BCME_NONRESIDENT */
314 -EINVAL
, /* BCME_SCANREJECT */
315 -EINVAL
, /* BCME_USAGE_ERROR */
316 -EIO
, /* BCME_IOCTL_ERROR */
317 -EIO
, /* BCME_SERIAL_PORT_ERR */
318 -EOPNOTSUPP
, /* BCME_DISABLED, BCME_NOTENABLED */
319 -EIO
, /* BCME_DECERR */
320 -EIO
, /* BCME_ENCERR */
321 -EIO
, /* BCME_MICERR */
322 -ERANGE
, /* BCME_REPLAY */
323 -EINVAL
, /* BCME_IE_NOTFOUND */
324 -EINVAL
, /* BCME_DATA_NOTFOUND */
326 /* When an new error code is added to bcmutils.h, add os
327 * specific error translation here as well
329 /* check if BCME_LAST changed since the last time this function was updated */
331 #error "You need to add a OS error translation in the linuxbcmerrormap \
332 for new error code defined in bcmutils.h"
337 /* translate bcmerrors into linux errors */
339 osl_error(int bcmerror
)
343 else if (bcmerror
< BCME_LAST
)
344 bcmerror
= BCME_ERROR
;
346 /* Array bounds covered by ASSERT in osl_attach */
347 return linuxbcmerrormap
[-bcmerror
];
351 #ifdef SHARED_OSL_CMN
352 osl_attach(void *pdev
, uint bustype
, bool pkttag
, void **osl_cmn
)
354 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
355 #endif /* SHARED_OSL_CMN */
357 #ifndef SHARED_OSL_CMN
358 void **osl_cmn
= NULL
;
359 #endif /* SHARED_OSL_CMN */
363 #ifdef BCM_SECURE_DMA
367 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
368 if (!(osh
= kmalloc(sizeof(osl_t
), flags
)))
373 bzero(osh
, sizeof(osl_t
));
375 if (osl_cmn
== NULL
|| *osl_cmn
== NULL
) {
376 if (!(osh
->cmn
= kmalloc(sizeof(osl_cmn_t
), flags
))) {
380 bzero(osh
->cmn
, sizeof(osl_cmn_t
));
383 atomic_set(&osh
->cmn
->malloced
, 0);
384 osh
->cmn
->dbgmem_list
= NULL
;
385 spin_lock_init(&(osh
->cmn
->dbgmem_lock
));
387 spin_lock_init(&(osh
->cmn
->pktalloc_lock
));
392 atomic_add(1, &osh
->cmn
->refcount
);
394 bcm_object_trace_init();
396 /* Check that error map has the right number of entries in it */
397 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
401 osh
->pub
.pkttag
= pkttag
;
402 osh
->bustype
= bustype
;
403 osh
->magic
= OS_HANDLE_MAGIC
;
404 #ifdef BCM_SECURE_DMA
406 if ((secdma_addr
!= 0) && (secdma_size
!= 0)) {
407 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
409 if (secdma_found
== 0) {
410 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr
;
411 secdma_memsize
= secdma_size
;
412 } else if (secdma_found
== 1) {
413 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr2
;
414 secdma_memsize
= secdma_size2
;
416 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
421 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
423 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
424 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
425 (unsigned int)osh
->contig_base_alloc
);
426 osh
->stb_ext_params
= SECDMA_MODULE_PARAMS
;
428 else if (stbpriv_init(osh
) == 0) {
429 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
431 if (secdma_found
== 0) {
432 osh
->contig_base_alloc
=
433 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL
, 0);
434 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size"), NULL
, 0);
435 } else if (secdma_found
== 1) {
436 osh
->contig_base_alloc
=
437 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL
, 0);
438 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL
, 0);
440 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
445 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
447 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
448 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
449 (unsigned int)osh
->contig_base_alloc
);
450 osh
->stb_ext_params
= SECDMA_EXT_FILE
;
453 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
459 osh
->contig_base_alloc_coherent_va
= osl_sec_dma_ioremap(osh
,
460 phys_to_page((u32
)osh
->contig_base_alloc
),
461 CMA_DMA_DESC_MEMBLOCK
, FALSE
, TRUE
);
463 if (osh
->contig_base_alloc_coherent_va
== NULL
) {
469 osh
->contig_base_coherent_va
= osh
->contig_base_alloc_coherent_va
;
470 osh
->contig_base_alloc_coherent
= osh
->contig_base_alloc
;
471 osl_sec_dma_init_consistent(osh
);
473 osh
->contig_base_alloc
+= CMA_DMA_DESC_MEMBLOCK
;
475 osh
->contig_base_alloc_va
= osl_sec_dma_ioremap(osh
,
476 phys_to_page((u32
)osh
->contig_base_alloc
), CMA_DMA_DATA_MEMBLOCK
, TRUE
, FALSE
);
477 if (osh
->contig_base_alloc_va
== NULL
) {
478 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
484 osh
->contig_base_va
= osh
->contig_base_alloc_va
;
486 if (BCME_OK
!= osl_sec_dma_init_elem_mem_block(osh
,
487 CMA_BUFSIZE_4K
, CMA_BUFNUM
, &osh
->sec_list_4096
)) {
488 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
489 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
495 osh
->sec_list_base_4096
= osh
->sec_list_4096
;
497 #endif /* BCM_SECURE_DMA */
503 osh
->pub
.mmbus
= TRUE
;
510 osh
->pub
.mmbus
= FALSE
;
522 int osl_static_mem_init(osl_t
*osh
, void *adapter
)
524 #ifdef CONFIG_DHD_USE_STATIC_BUF
525 if (!bcm_static_buf
&& adapter
) {
526 if (!(bcm_static_buf
= (bcm_static_buf_t
*)wifi_platform_prealloc(adapter
,
527 3, STATIC_BUF_SIZE
+ STATIC_BUF_TOTAL_LEN
))) {
528 printk("can not alloc static buf!\n");
529 bcm_static_skb
= NULL
;
530 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
533 printk("alloc static buf at %p!\n", bcm_static_buf
);
536 spin_lock_init(&bcm_static_buf
->static_lock
);
538 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
541 #if defined(DHD_USE_STATIC_CTRLBUF)
542 if (!bcm_static_skb
&& adapter
) {
544 void *skb_buff_ptr
= 0;
545 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
546 skb_buff_ptr
= wifi_platform_prealloc(adapter
, 4, 0);
548 printk("cannot alloc static buf!\n");
549 bcm_static_buf
= NULL
;
550 bcm_static_skb
= NULL
;
551 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
555 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*) *
556 (STATIC_PKT_MAX_NUM
));
557 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
558 bcm_static_skb
->pkt_use
[i
] = 0;
561 #ifdef DHD_USE_STATIC_CTRLBUF
562 spin_lock_init(&bcm_static_skb
->osl_pkt_lock
);
563 bcm_static_skb
->last_allocated_index
= 0;
565 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
566 #endif /* DHD_USE_STATIC_CTRLBUF */
569 #endif /* CONFIG_DHD_USE_STATIC_BUF */
574 void osl_set_bus_handle(osl_t
*osh
, void *bus_handle
)
576 osh
->bus_handle
= bus_handle
;
579 void* osl_get_bus_handle(osl_t
*osh
)
581 return osh
->bus_handle
;
585 osl_detach(osl_t
*osh
)
590 #ifdef BCM_SECURE_DMA
591 if (osh
->stb_ext_params
== SECDMA_EXT_FILE
)
593 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_4K
, CMA_BUFNUM
, osh
->sec_list_base_4096
);
594 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
595 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
598 #endif /* BCM_SECURE_DMA */
601 bcm_object_trace_deinit();
603 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
604 atomic_sub(1, &osh
->cmn
->refcount
);
605 if (atomic_read(&osh
->cmn
->refcount
) == 0) {
611 int osl_static_mem_deinit(osl_t
*osh
, void *adapter
)
613 #ifdef CONFIG_DHD_USE_STATIC_BUF
614 if (bcm_static_buf
) {
617 #endif /* CONFIG_DHD_USE_STATIC_BUF */
621 static struct sk_buff
*osl_alloc_skb(osl_t
*osh
, unsigned int len
)
624 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
625 gfp_t flags
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
626 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
629 skb
= __dev_alloc_skb(len
, flags
);
631 skb
= dev_alloc_skb(len
);
632 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
638 /* Convert a driver packet to native(OS) packet
639 * In the process, packettag is zeroed out before sending up
640 * IP code depends on skb->cb to be setup correctly with various options
641 * In our case, that means it should be 0
643 struct sk_buff
* BCMFASTPATH
644 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
646 struct sk_buff
*nskb
;
649 OSL_PKTTAG_CLEAR(pkt
);
651 /* Decrement the packet counter */
652 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
653 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->cmn
->pktalloced
);
656 return (struct sk_buff
*)pkt
;
659 /* Convert a native(OS) packet to driver packet.
660 * In the process, native packet is destroyed, there is no copying
661 * Also, a packettag is zeroed out
664 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
666 struct sk_buff
*nskb
;
669 OSL_PKTTAG_CLEAR(pkt
);
671 /* Increment the packet counter */
672 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
673 atomic_add(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->cmn
->pktalloced
);
679 /* Return a new packet. zero out pkttag */
681 osl_pktget(osl_t
*osh
, uint len
)
685 if (lmtest
!= FALSE
) {
686 get_random_bytes(&num
, sizeof(uchar
));
687 if ((num
+ 1) <= (256 * lmtest
/ 100))
691 if ((skb
= osl_alloc_skb(osh
, len
))) {
696 atomic_inc(&osh
->cmn
->pktalloced
);
699 return ((void*) skb
);
703 /* Free the driver packet. Free the tag if present */
705 osl_pktfree(osl_t
*osh
, void *p
, bool send
)
707 struct sk_buff
*skb
, *nskb
;
711 skb
= (struct sk_buff
*) p
;
713 if (send
&& osh
->pub
.tx_fn
)
714 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
716 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
718 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
719 if (skb
&& (skb
->mac_len
== PREALLOC_USED_MAGIC
)) {
720 printk("%s: pkt %p is from static pool\n",
726 if (skb
&& (skb
->mac_len
== PREALLOC_FREE_MAGIC
)) {
727 printk("%s: pkt %p is from static pool and not in used\n",
732 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
734 /* perversion: we use skb->next to chain multi-skb packets */
743 dev_kfree_skb_any(skb
);
745 atomic_dec(&osh
->cmn
->pktalloced
);
750 #ifdef CONFIG_DHD_USE_STATIC_BUF
752 osl_pktget_static(osl_t
*osh
, uint len
)
756 #ifdef DHD_USE_STATIC_CTRLBUF
758 #endif /* DHD_USE_STATIC_CTRLBUF */
761 return osl_pktget(osh
, len
);
763 if (len
> DHD_SKB_MAX_BUFSIZE
) {
764 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
765 return osl_pktget(osh
, len
);
768 #ifdef DHD_USE_STATIC_CTRLBUF
769 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
771 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
773 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
774 index
= bcm_static_skb
->last_allocated_index
% STATIC_PKT_2PAGE_NUM
;
775 bcm_static_skb
->last_allocated_index
++;
776 if (bcm_static_skb
->skb_8k
[index
] &&
777 bcm_static_skb
->pkt_use
[index
] == 0) {
782 if ((i
!= STATIC_PKT_2PAGE_NUM
) &&
783 (index
>= 0) && (index
< STATIC_PKT_2PAGE_NUM
)) {
784 bcm_static_skb
->pkt_use
[index
] = 1;
785 skb
= bcm_static_skb
->skb_8k
[index
];
786 skb
->data
= skb
->head
;
787 #ifdef NET_SKBUFF_DATA_USES_OFFSET
788 skb_set_tail_pointer(skb
, NET_SKB_PAD
);
790 skb
->tail
= skb
->data
+ NET_SKB_PAD
;
791 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
792 skb
->data
+= NET_SKB_PAD
;
795 #ifdef NET_SKBUFF_DATA_USES_OFFSET
796 skb_set_tail_pointer(skb
, len
);
798 skb
->tail
= skb
->data
+ len
;
799 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
801 skb
->mac_len
= PREALLOC_USED_MAGIC
;
802 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
807 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
808 printk("%s: all static pkt in use!\n", __FUNCTION__
);
811 down(&bcm_static_skb
->osl_pkt_sem
);
813 if (len
<= DHD_SKB_1PAGE_BUFSIZE
) {
814 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
815 if (bcm_static_skb
->skb_4k
[i
] &&
816 bcm_static_skb
->pkt_use
[i
] == 0) {
821 if (i
!= STATIC_PKT_MAX_NUM
) {
822 bcm_static_skb
->pkt_use
[i
] = 1;
824 skb
= bcm_static_skb
->skb_4k
[i
];
825 #ifdef NET_SKBUFF_DATA_USES_OFFSET
826 skb_set_tail_pointer(skb
, len
);
828 skb
->tail
= skb
->data
+ len
;
829 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
832 up(&bcm_static_skb
->osl_pkt_sem
);
837 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
838 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
839 if (bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
] &&
840 bcm_static_skb
->pkt_use
[i
] == 0) {
845 if ((i
>= STATIC_PKT_1PAGE_NUM
) && (i
< STATIC_PKT_1_2PAGE_NUM
)) {
846 bcm_static_skb
->pkt_use
[i
] = 1;
847 skb
= bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
];
848 #ifdef NET_SKBUFF_DATA_USES_OFFSET
849 skb_set_tail_pointer(skb
, len
);
851 skb
->tail
= skb
->data
+ len
;
852 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
855 up(&bcm_static_skb
->osl_pkt_sem
);
860 #if defined(ENHANCED_STATIC_BUF)
861 if (bcm_static_skb
->skb_16k
&&
862 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] == 0) {
863 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 1;
865 skb
= bcm_static_skb
->skb_16k
;
866 #ifdef NET_SKBUFF_DATA_USES_OFFSET
867 skb_set_tail_pointer(skb
, len
);
869 skb
->tail
= skb
->data
+ len
;
870 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
873 up(&bcm_static_skb
->osl_pkt_sem
);
876 #endif /* ENHANCED_STATIC_BUF */
878 up(&bcm_static_skb
->osl_pkt_sem
);
879 printk("%s: all static pkt in use!\n", __FUNCTION__
);
880 return osl_pktget(osh
, len
);
881 #endif /* DHD_USE_STATIC_CTRLBUF */
885 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
888 #ifdef DHD_USE_STATIC_CTRLBUF
889 struct sk_buff
*skb
= (struct sk_buff
*)p
;
891 #endif /* DHD_USE_STATIC_CTRLBUF */
897 if (!bcm_static_skb
) {
898 osl_pktfree(osh
, p
, send
);
902 #ifdef DHD_USE_STATIC_CTRLBUF
903 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
905 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
906 if (p
== bcm_static_skb
->skb_8k
[i
]) {
907 if (bcm_static_skb
->pkt_use
[i
] == 0) {
908 printk("%s: static pkt idx %d(%p) is double free\n",
911 bcm_static_skb
->pkt_use
[i
] = 0;
914 if (skb
->mac_len
!= PREALLOC_USED_MAGIC
) {
915 printk("%s: static pkt idx %d(%p) is not in used\n",
919 skb
->mac_len
= PREALLOC_FREE_MAGIC
;
920 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
925 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
926 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__
, p
);
928 down(&bcm_static_skb
->osl_pkt_sem
);
929 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
930 if (p
== bcm_static_skb
->skb_4k
[i
]) {
931 bcm_static_skb
->pkt_use
[i
] = 0;
932 up(&bcm_static_skb
->osl_pkt_sem
);
937 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
938 if (p
== bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
]) {
939 bcm_static_skb
->pkt_use
[i
] = 0;
940 up(&bcm_static_skb
->osl_pkt_sem
);
944 #ifdef ENHANCED_STATIC_BUF
945 if (p
== bcm_static_skb
->skb_16k
) {
946 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 0;
947 up(&bcm_static_skb
->osl_pkt_sem
);
951 up(&bcm_static_skb
->osl_pkt_sem
);
952 osl_pktfree(osh
, p
, send
);
953 #endif /* DHD_USE_STATIC_CTRLBUF */
955 #endif /* CONFIG_DHD_USE_STATIC_BUF */
958 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
961 uint retry
= PCI_CFG_RETRY
;
963 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
965 /* only 4byte access supported */
969 pci_read_config_dword(osh
->pdev
, offset
, &val
);
970 if (val
!= 0xffffffff)
979 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
981 uint retry
= PCI_CFG_RETRY
;
983 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
985 /* only 4byte access supported */
989 pci_write_config_dword(osh
->pdev
, offset
, val
);
990 if (offset
!= PCI_BAR0_WIN
)
992 if (osl_pci_read_config(osh
, offset
, size
) == val
)
998 /* return bus # for the pci device pointed by osh->pdev */
1000 osl_pci_bus(osl_t
*osh
)
1002 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1004 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1005 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
1007 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
1011 /* return slot # for the pci device pointed by osh->pdev */
1013 osl_pci_slot(osl_t
*osh
)
1015 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1017 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1018 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
) + 1;
1020 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
1024 /* return domain # for the pci device pointed by osh->pdev */
1026 osl_pcie_domain(osl_t
*osh
)
1028 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1030 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
1033 /* return bus # for the pci device pointed by osh->pdev */
1035 osl_pcie_bus(osl_t
*osh
)
1037 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1039 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
1042 /* return the pci device pointed by osh->pdev */
1044 osl_pci_device(osl_t
*osh
)
1046 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1052 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
1057 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
1059 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
1063 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
1065 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
1069 osl_malloc(osl_t
*osh
, uint size
)
1074 /* only ASSERT if osh is defined */
1076 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1077 #ifdef CONFIG_DHD_USE_STATIC_BUF
1080 unsigned long irq_flags
;
1082 if ((size
>= PAGE_SIZE
)&&(size
<= STATIC_BUF_SIZE
))
1084 spin_lock_irqsave(&bcm_static_buf
->static_lock
, irq_flags
);
1086 for (i
= 0; i
< STATIC_BUF_MAX_NUM
; i
++)
1088 if (bcm_static_buf
->buf_use
[i
] == 0)
1092 if (i
== STATIC_BUF_MAX_NUM
)
1094 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
1095 printk("all static buff in use!\n");
1099 bcm_static_buf
->buf_use
[i
] = 1;
1100 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
1102 bzero(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
, size
);
1104 atomic_add(size
, &osh
->cmn
->malloced
);
1106 return ((void *)(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
));
1110 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1112 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
1113 if ((addr
= kmalloc(size
, flags
)) == NULL
) {
1118 if (osh
&& osh
->cmn
)
1119 atomic_add(size
, &osh
->cmn
->malloced
);
1125 osl_mallocz(osl_t
*osh
, uint size
)
1129 ptr
= osl_malloc(osh
, size
);
1139 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
1141 #ifdef CONFIG_DHD_USE_STATIC_BUF
1142 unsigned long flags
;
1146 if ((addr
> (void *)bcm_static_buf
) && ((unsigned char *)addr
1147 <= ((unsigned char *)bcm_static_buf
+ STATIC_BUF_TOTAL_LEN
)))
1151 buf_idx
= ((unsigned char *)addr
- bcm_static_buf
->buf_ptr
)/STATIC_BUF_SIZE
;
1153 spin_lock_irqsave(&bcm_static_buf
->static_lock
, flags
);
1154 bcm_static_buf
->buf_use
[buf_idx
] = 0;
1155 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, flags
);
1157 if (osh
&& osh
->cmn
) {
1158 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1159 atomic_sub(size
, &osh
->cmn
->malloced
);
1164 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1165 if (osh
&& osh
->cmn
) {
1166 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1168 ASSERT(size
<= osl_malloced(osh
));
1170 atomic_sub(size
, &osh
->cmn
->malloced
);
1176 osl_check_memleak(osl_t
*osh
)
1178 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1179 if (atomic_read(&osh
->cmn
->refcount
) == 1)
1180 return (atomic_read(&osh
->cmn
->malloced
));
1186 osl_malloced(osl_t
*osh
)
1188 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1189 return (atomic_read(&osh
->cmn
->malloced
));
1193 osl_malloc_failed(osl_t
*osh
)
1195 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1196 return (osh
->failed
);
1201 osl_dma_consistent_align(void)
1207 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, dmaaddr_t
*pap
)
1210 uint16 align
= (1 << align_bits
);
1211 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1213 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
1217 #ifndef BCM_SECURE_DMA
1218 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1219 va
= kmalloc(size
, GFP_ATOMIC
| __GFP_ZERO
);
1221 *pap
= (ulong
)__virt_to_phys((ulong
)va
);
1225 struct pci_dev
*hwdev
= osh
->pdev
;
1227 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1230 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
1231 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1232 va
= dma_alloc_coherent(&hwdev
->dev
, size
, &pap_lin
, flags
);
1233 *pap
= (dmaaddr_t
)pap_lin
;
1235 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1237 va
= osl_sec_dma_alloc_consistent(osh
, size
, align_bits
, pap
);
1238 #endif /* BCM_SECURE_DMA */
1243 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
1245 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1247 #ifndef BCM_SECURE_DMA
1248 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1251 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
1252 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1254 osl_sec_dma_free_consistent(osh
, va
, size
, pa
);
1255 #endif /* BCM_SECURE_DMA */
1258 dmaaddr_t BCMFASTPATH
1259 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*dmah
)
1263 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1264 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1269 return (pci_map_single(osh
->pdev
, va
, size
, dir
));
1273 osl_dma_unmap(osl_t
*osh
, uint pa
, uint size
, int direction
)
1277 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1280 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1281 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
1284 /* OSL function for CPU relax */
1285 inline void BCMFASTPATH
1292 inline void BCMFASTPATH
1293 osl_cache_flush(void *va
, uint size
)
1295 unsigned long l
= ROUNDDN((unsigned long)va
, L1_CACHE_BYTES
);
1296 unsigned long e
= ROUNDUP((unsigned long)(va
+size
), L1_CACHE_BYTES
);
1299 flush_dcache_line(l
); /* Hit_Writeback_Inv_D */
1300 l
+= L1_CACHE_BYTES
; /* next cache line base */
1304 inline void BCMFASTPATH
1305 osl_cache_inv(void *va
, uint size
)
1307 unsigned long l
= ROUNDDN((unsigned long)va
, L1_CACHE_BYTES
);
1308 unsigned long e
= ROUNDUP((unsigned long)(va
+size
), L1_CACHE_BYTES
);
1311 invalidate_dcache_line(l
); /* Hit_Invalidate_D */
1312 l
+= L1_CACHE_BYTES
; /* next cache line base */
1316 inline void osl_prefetch(const void *ptr
)
1318 __asm__
__volatile__(".set mips4\npref %0,(%1)\n.set mips0\n"::"i" (Pref_Load
), "r" (ptr
));
1321 #elif (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
1323 inline void BCMFASTPATH
1324 osl_cache_flush(void *va
, uint size
)
1328 dma_sync_single_for_device(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
, DMA_TX
);
1331 inline void BCMFASTPATH
1332 osl_cache_inv(void *va
, uint size
)
1335 dma_sync_single_for_cpu(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
, DMA_RX
);
1338 inline void osl_prefetch(const void *ptr
)
1340 __asm__
__volatile__("pld\t%0" :: "o"(*(const char *)ptr
) : "cc");
1343 int osl_arch_is_coherent(void)
1349 inline int osl_acp_war_enab(void)
1358 osl_delay(uint usec
)
1363 d
= MIN(usec
, 1000);
1372 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1374 usleep_range(ms
*1000, ms
*1000 + 1000);
1383 * The pkttag contents are NOT cloned.
1386 osl_pktdup(osl_t
*osh
, void *skb
)
1390 ASSERT(!PKTISCHAINED(skb
));
1392 /* clear the CTFBUF flag if set and map the rest of the buffer
1395 PKTCTFMAP(osh
, skb
);
1397 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1398 if ((p
= pskb_copy((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1400 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1405 /* Clear PKTC context */
1406 PKTSETCLINK(p
, NULL
);
1409 PKTCSETLEN(p
, PKTLEN(osh
, skb
));
1411 /* skb_clone copies skb->cb.. we don't want that */
1412 if (osh
->pub
.pkttag
)
1413 OSL_PKTTAG_CLEAR(p
);
1415 /* Increment the packet counter */
1416 atomic_inc(&osh
->cmn
->pktalloced
);
1424 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1428 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1432 osl_pktalloced(osl_t
*osh
)
1434 if (atomic_read(&osh
->cmn
->refcount
) == 1)
1435 return (atomic_read(&osh
->cmn
->pktalloced
));
1445 get_random_bytes(&rand
, sizeof(rand
));
1450 /* Linux Kernel: File Operations: start */
1452 osl_os_open_image(char *filename
)
1456 fp
= filp_open(filename
, O_RDONLY
, 0);
1458 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1460 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1470 osl_os_get_image_block(char *buf
, int len
, void *image
)
1472 struct file
*fp
= (struct file
*)image
;
1478 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
1486 osl_os_close_image(void *image
)
1489 filp_close((struct file
*)image
, NULL
);
1493 osl_os_image_size(void *image
)
1495 int len
= 0, curroffset
;
1498 /* store the current offset */
1499 curroffset
= generic_file_llseek(image
, 0, 1);
1500 /* goto end of file to get length */
1501 len
= generic_file_llseek(image
, 0, 2);
1502 /* restore back the offset */
1503 generic_file_llseek(image
, curroffset
, 0);
1508 /* Linux Kernel: File Operations: end */
1510 #if (defined(STB) && defined(__arm__))
1511 inline void osl_pcie_rreg(osl_t
*osh
, ulong addr
, void *v
, uint size
)
1513 unsigned long flags
= 0;
1516 if (osh
&& BUSTYPE(osh
->bustype
) == PCI_BUS
)
1519 if (pci_access
&& ACP_WAR_ENAB())
1520 spin_lock_irqsave(&l2x0_reg_lock
, flags
);
1524 *(uint8
*)v
= readb((volatile uint8
*)(addr
));
1526 case sizeof(uint16
):
1527 *(uint16
*)v
= readw((volatile uint16
*)(addr
));
1529 case sizeof(uint32
):
1530 *(uint32
*)v
= readl((volatile uint32
*)(addr
));
1532 case sizeof(uint64
):
1533 *(uint64
*)v
= *((volatile uint64
*)(addr
));
1537 if (pci_access
&& ACP_WAR_ENAB())
1538 spin_unlock_irqrestore(&l2x0_reg_lock
, flags
);
1542 /* APIs to set/get specific quirks in OSL layer */
1544 osl_flag_set(osl_t
*osh
, uint32 mask
)
1550 inline bool BCMFASTPATH
1554 osl_is_flag_set(osl_t
*osh
, uint32 mask
)
1556 return (osh
->flags
& mask
);
1559 #ifdef BCM_SECURE_DMA
1561 osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
, bool iscache
, bool isdecr
)
1568 size
= PAGE_ALIGN(size
);
1569 order
= get_order(size
);
1571 map
= kmalloc(sizeof(struct page
*) << order
, GFP_ATOMIC
);
1576 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
1580 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
, __pgprot(PAGE_KERNEL
));
1582 osh
->contig_delta_va_pa
= (phys_addr_t
)(addr
- page_to_phys(page
));
1587 #if defined(__ARM_ARCH_7A__)
1588 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
,
1589 pgprot_noncached(__pgprot(PAGE_KERNEL
)));
1592 osh
->contig_delta_va_pa
= (phys_addr_t
)(addr
- page_to_phys(page
));
1597 return (void *)addr
;
1601 osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
)
1603 vunmap(contig_base_va
);
1607 osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, sec_mem_elem_t
**list
)
1611 sec_mem_elem_t
*sec_mem_elem
;
1613 if ((sec_mem_elem
= kmalloc(sizeof(sec_mem_elem_t
)*(max
), GFP_ATOMIC
)) != NULL
) {
1615 *list
= sec_mem_elem
;
1616 bzero(sec_mem_elem
, sizeof(sec_mem_elem_t
)*(max
));
1617 for (i
= 0; i
< max
-1; i
++) {
1618 sec_mem_elem
->next
= (sec_mem_elem
+ 1);
1619 sec_mem_elem
->size
= mbsize
;
1620 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
1621 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
1623 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1624 osh
->contig_base_alloc
+= mbsize
;
1625 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
1627 sec_mem_elem
= sec_mem_elem
+ 1;
1629 sec_mem_elem
->next
= NULL
;
1630 sec_mem_elem
->size
= mbsize
;
1631 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
1632 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
1634 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1635 osh
->contig_base_alloc
+= mbsize
;
1636 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
1639 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__
);
1647 osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, void *sec_list_base
)
1650 kfree(sec_list_base
);
1653 static sec_mem_elem_t
* BCMFASTPATH
1654 osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
, int direction
,
1655 struct sec_cma_info
*ptr_cma_info
, uint offset
)
1657 sec_mem_elem_t
*sec_mem_elem
= NULL
;
1659 ASSERT(osh
->sec_list_4096
);
1660 sec_mem_elem
= osh
->sec_list_4096
;
1661 osh
->sec_list_4096
= sec_mem_elem
->next
;
1663 sec_mem_elem
->next
= NULL
;
1665 if (ptr_cma_info
->sec_alloc_list_tail
) {
1666 ptr_cma_info
->sec_alloc_list_tail
->next
= sec_mem_elem
;
1667 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
1670 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1671 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
1672 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
;
1673 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
1675 return sec_mem_elem
;
1678 static void BCMFASTPATH
1679 osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
)
1681 sec_mem_elem
->dma_handle
= 0x0;
1682 sec_mem_elem
->va
= NULL
;
1683 sec_mem_elem
->next
= osh
->sec_list_4096
;
1684 osh
->sec_list_4096
= sec_mem_elem
;
1687 static sec_mem_elem_t
* BCMFASTPATH
1688 osl_sec_dma_find_rem_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
, dma_addr_t dma_handle
)
1690 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
1691 sec_mem_elem_t
*sec_prv_elem
= ptr_cma_info
->sec_alloc_list
;
1693 if (!sec_mem_elem
) {
1694 printk("osl_sec_dma_find_rem_elem ptr_cma_info->sec_alloc_list is NULL \n");
1698 if (sec_mem_elem
->dma_handle
== dma_handle
) {
1700 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
1702 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
) {
1703 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
1704 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
1707 return sec_mem_elem
;
1709 sec_mem_elem
= sec_mem_elem
->next
;
1711 while (sec_mem_elem
!= NULL
) {
1713 if (sec_mem_elem
->dma_handle
== dma_handle
) {
1715 sec_prv_elem
->next
= sec_mem_elem
->next
;
1716 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
)
1717 ptr_cma_info
->sec_alloc_list_tail
= sec_prv_elem
;
1719 return sec_mem_elem
;
1721 sec_prv_elem
= sec_mem_elem
;
1722 sec_mem_elem
= sec_mem_elem
->next
;
1727 static sec_mem_elem_t
*
1728 osl_sec_dma_rem_first_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
1730 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
1734 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
1736 if (ptr_cma_info
->sec_alloc_list
== NULL
)
1737 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
1739 return sec_mem_elem
;
1745 static void * BCMFASTPATH
1746 osl_sec_dma_last_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
1748 return ptr_cma_info
->sec_alloc_list_tail
;
1751 dma_addr_t BCMFASTPATH
1752 osl_sec_dma_map_txmeta(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
1753 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
)
1755 sec_mem_elem_t
*sec_mem_elem
;
1756 struct page
*pa_cma_page
;
1758 void *vaorig
= ((uint8
*)va
+ size
);
1759 dma_addr_t dma_handle
= 0x0;
1760 /* packet will be the one added with osl_sec_dma_map() just before this call */
1762 sec_mem_elem
= osl_sec_dma_last_elem(osh
, ptr_cma_info
);
1764 if (sec_mem_elem
&& sec_mem_elem
->va
== vaorig
) {
1766 pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1767 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
1769 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
1770 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1773 printf("%s: error orig va not found va = 0x%p \n",
1774 __FUNCTION__
, vaorig
);
1779 dma_addr_t BCMFASTPATH
1780 osl_sec_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
1781 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
, uint offset
)
1784 sec_mem_elem_t
*sec_mem_elem
;
1785 struct page
*pa_cma_page
;
1786 void *pa_cma_kmap_va
= NULL
;
1788 dma_addr_t dma_handle
= 0x0;
1791 ASSERT((direction
== DMA_RX
) || (direction
== DMA_TX
));
1792 sec_mem_elem
= osl_sec_dma_alloc_mem_elem(osh
, va
, size
, direction
, ptr_cma_info
, offset
);
1794 sec_mem_elem
->va
= va
;
1795 sec_mem_elem
->direction
= direction
;
1796 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
1798 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
1799 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1800 * pa_cma_kmap_va += loffset;
1803 pa_cma_kmap_va
= sec_mem_elem
->vac
;
1804 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ offset
);
1807 if (direction
== DMA_TX
) {
1808 memcpy((uint8
*)pa_cma_kmap_va
+offset
, va
, size
);
1812 dmah
->origsize
= buflen
;
1817 if ((p
!= NULL
) && (dmah
!= NULL
)) {
1819 dmah
->origsize
= buflen
;
1823 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
+offset
, buflen
,
1824 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1827 dmah
->segs
[0].addr
= dma_handle
;
1828 dmah
->segs
[0].length
= buflen
;
1830 sec_mem_elem
->dma_handle
= dma_handle
;
1831 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
1835 dma_addr_t BCMFASTPATH
1836 osl_sec_dma_dd_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*map
)
1839 struct page
*pa_cma_page
;
1841 dma_addr_t dma_handle
= 0x0;
1844 pa_cma
= (phys_addr_t
)(va
- osh
->contig_delta_va_pa
);
1845 pa_cma_page
= phys_to_page(pa_cma
);
1846 loffset
= pa_cma
-(pa_cma
& ~(PAGE_SIZE
-1));
1848 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
1849 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1855 osl_sec_dma_unmap(osl_t
*osh
, dma_addr_t dma_handle
, uint size
, int direction
,
1856 void *p
, hnddma_seg_map_t
*map
, void *ptr_cma_info
, uint offset
)
1858 sec_mem_elem_t
*sec_mem_elem
;
1859 struct page
*pa_cma_page
;
1860 void *pa_cma_kmap_va
= NULL
;
1865 BCM_REFERENCE(buflen
);
1866 BCM_REFERENCE(read_count
);
1868 sec_mem_elem
= osl_sec_dma_find_rem_elem(osh
, ptr_cma_info
, dma_handle
);
1869 ASSERT(sec_mem_elem
);
1871 va
= sec_mem_elem
->va
;
1872 va
= (uint8
*)va
- offset
;
1873 pa_cma
= sec_mem_elem
->pa_cma
;
1875 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
1878 if (direction
== DMA_RX
) {
1882 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1883 * pa_cma_kmap_va += loffset;
1886 pa_cma_kmap_va
= sec_mem_elem
->vac
;
1888 dma_unmap_page(OSH_NULL
, pa_cma
, size
, DMA_FROM_DEVICE
);
1889 memcpy(va
, pa_cma_kmap_va
, size
);
1890 /* kunmap_atomic(pa_cma_kmap_va); */
1893 dma_unmap_page(OSH_NULL
, pa_cma
, size
+offset
, DMA_TO_DEVICE
);
1896 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
1900 osl_sec_dma_unmap_all(osl_t
*osh
, void *ptr_cma_info
)
1903 sec_mem_elem_t
*sec_mem_elem
;
1905 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
1907 while (sec_mem_elem
!= NULL
) {
1909 dma_unmap_page(OSH_NULL
, sec_mem_elem
->pa_cma
, sec_mem_elem
->size
,
1910 sec_mem_elem
->direction
== DMA_TX
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1911 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
1913 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
1918 osl_sec_dma_init_consistent(osl_t
*osh
)
1921 void *temp_va
= osh
->contig_base_alloc_coherent_va
;
1922 phys_addr_t temp_pa
= osh
->contig_base_alloc_coherent
;
1924 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1925 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
1926 osh
->sec_cma_coherent
[i
].va
= temp_va
;
1927 osh
->sec_cma_coherent
[i
].pa
= temp_pa
;
1928 temp_va
+= SEC_CMA_COHERENT_BLK
;
1929 temp_pa
+= SEC_CMA_COHERENT_BLK
;
1934 osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, ulong
*pap
)
1937 void *temp_va
= NULL
;
1941 if (size
> SEC_CMA_COHERENT_BLK
) {
1942 printf("%s unsupported size\n", __FUNCTION__
);
1946 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1947 if (osh
->sec_cma_coherent
[i
].avail
== TRUE
) {
1948 temp_va
= osh
->sec_cma_coherent
[i
].va
;
1949 temp_pa
= osh
->sec_cma_coherent
[i
].pa
;
1950 osh
->sec_cma_coherent
[i
].avail
= FALSE
;
1955 if (i
== SEC_CMA_COHERENT_MAX
)
1956 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
1957 temp_va
, (ulong
)temp_pa
, size
);
1959 *pap
= (unsigned long)temp_pa
;
1964 osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
1968 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1969 if (osh
->sec_cma_coherent
[i
].va
== va
) {
1970 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
1974 if (i
== SEC_CMA_COHERENT_MAX
)
1975 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
1976 va
, (ulong
)pa
, size
);
1979 #endif /* BCM_SECURE_DMA */
1981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
1982 #include <linux/kallsyms.h>
1983 #include <net/sock.h>
1985 osl_pkt_orphan_partial(struct sk_buff
*skb
)
1988 static void *p_tcp_wfree
= NULL
;
1990 if (!skb
->destructor
|| skb
->destructor
== sock_wfree
)
1993 if (unlikely(!p_tcp_wfree
)) {
1994 char sym
[KSYM_SYMBOL_LEN
];
1995 sprint_symbol(sym
, (unsigned long)skb
->destructor
);
1997 if (!strcmp(sym
, "tcp_wfree"))
1998 p_tcp_wfree
= skb
->destructor
;
2003 if (unlikely(skb
->destructor
!= p_tcp_wfree
|| !skb
->sk
))
2006 /* abstract a certain portion of skb truesize from the socket
2007 * sk_wmem_alloc to allow more skb can be allocated for this
2008 * socket for better cusion meeting WiFi device requirement
2010 fraction
= skb
->truesize
* (TSQ_MULTIPLIER
- 1) / TSQ_MULTIPLIER
;
2011 skb
->truesize
-= fraction
;
2012 atomic_sub(fraction
, &skb
->sk
->sk_wmem_alloc
);
2014 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */