2 * Linux OS Independent Layer
4 * Copyright (C) 1999-2018, Broadcom Corporation
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $
33 #include <bcmendian.h>
38 #if !defined(STBLINUX)
39 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
40 #include <asm/cacheflush.h>
41 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
44 #include <linux/random.h>
48 #include <linux/delay.h>
49 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/kernel.h>
57 #include <linux/printk.h>
58 #include <linux/errno.h>
60 #include <linux/moduleparam.h>
62 #include <linux/skbuff.h>
64 #include <linux/highmem.h>
65 #include <linux/dma-mapping.h>
66 #include <asm/memory.h>
67 #endif /* BCM_SECURE_DMA */
72 #include <linux/spinlock.h>
73 extern spinlock_t l2x0_reg_lock
;
76 #ifdef BCM_OBJECT_TRACE
78 #endif /* BCM_OBJECT_TRACE */
80 #define PCI_CFG_RETRY 10
82 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
83 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
84 #define DUMPBUFSZ 1024
86 /* dependancy check */
87 #if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
88 #error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
89 #endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
91 #ifdef CONFIG_DHD_USE_STATIC_BUF
92 #ifdef DHD_USE_STATIC_CTRLBUF
93 #define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
94 #define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
95 #define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
97 #define PREALLOC_FREE_MAGIC 0xFEDC
98 #define PREALLOC_USED_MAGIC 0xFCDE
100 #define DHD_SKB_HDRSIZE 336
101 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
102 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
103 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
104 #endif /* DHD_USE_STATIC_CTRLBUF */
106 #define STATIC_BUF_MAX_NUM 16
107 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
108 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
110 typedef struct bcm_static_buf
{
111 spinlock_t static_lock
;
112 unsigned char *buf_ptr
;
113 unsigned char buf_use
[STATIC_BUF_MAX_NUM
];
116 static bcm_static_buf_t
*bcm_static_buf
= 0;
118 #ifdef DHD_USE_STATIC_CTRLBUF
119 #define STATIC_PKT_4PAGE_NUM 0
120 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
121 #elif defined(ENHANCED_STATIC_BUF)
122 #define STATIC_PKT_4PAGE_NUM 1
123 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
125 #define STATIC_PKT_4PAGE_NUM 0
126 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
127 #endif /* DHD_USE_STATIC_CTRLBUF */
129 #ifdef DHD_USE_STATIC_CTRLBUF
130 #define STATIC_PKT_1PAGE_NUM 0
131 #define STATIC_PKT_2PAGE_NUM 128
133 #define STATIC_PKT_1PAGE_NUM 8
134 #define STATIC_PKT_2PAGE_NUM 8
135 #endif /* DHD_USE_STATIC_CTRLBUF */
137 #define STATIC_PKT_1_2PAGE_NUM \
138 ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
139 #define STATIC_PKT_MAX_NUM \
140 ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
142 typedef struct bcm_static_pkt
{
143 #ifdef DHD_USE_STATIC_CTRLBUF
144 struct sk_buff
*skb_8k
[STATIC_PKT_2PAGE_NUM
];
145 unsigned char pkt_invalid
[STATIC_PKT_2PAGE_NUM
];
146 spinlock_t osl_pkt_lock
;
147 uint32 last_allocated_index
;
149 struct sk_buff
*skb_4k
[STATIC_PKT_1PAGE_NUM
];
150 struct sk_buff
*skb_8k
[STATIC_PKT_2PAGE_NUM
];
151 #ifdef ENHANCED_STATIC_BUF
152 struct sk_buff
*skb_16k
;
153 #endif /* ENHANCED_STATIC_BUF */
154 struct semaphore osl_pkt_sem
;
155 #endif /* DHD_USE_STATIC_CTRLBUF */
156 unsigned char pkt_use
[STATIC_PKT_MAX_NUM
];
159 static bcm_static_pkt_t
*bcm_static_skb
= 0;
161 void* wifi_platform_prealloc(void *adapter
, int section
, unsigned long size
);
162 #endif /* CONFIG_DHD_USE_STATIC_BUF */
164 typedef struct bcm_mem_link
{
165 struct bcm_mem_link
*prev
;
166 struct bcm_mem_link
*next
;
170 char file
[BCM_MEM_FILENAME_LEN
];
173 struct osl_cmn_info
{
175 atomic_t pktalloced
; /* Number of allocated packet buffers */
176 spinlock_t dbgmem_lock
;
177 bcm_mem_link_t
*dbgmem_list
;
178 bcm_mem_link_t
*dbgvmem_list
;
179 spinlock_t pktalloc_lock
;
180 atomic_t refcount
; /* Number of references to this shared structure. */
182 typedef struct osl_cmn_info osl_cmn_t
;
186 uint32 flags
; /* If specific cases to be handled in the OSL */
194 osl_cmn_t
*cmn
; /* Common OSL related data shred between two OSH's */
197 #ifdef BCM_SECURE_DMA
199 struct sec_mem_elem
*sec_list_512
;
200 struct sec_mem_elem
*sec_list_base_512
;
201 struct sec_mem_elem
*sec_list_2048
;
202 struct sec_mem_elem
*sec_list_base_2048
;
204 struct sec_mem_elem
*sec_list_4096
;
205 struct sec_mem_elem
*sec_list_base_4096
;
206 phys_addr_t contig_base
;
207 void *contig_base_va
;
208 phys_addr_t contig_base_alloc
;
209 void *contig_base_alloc_va
;
210 phys_addr_t contig_base_alloc_coherent
;
211 void *contig_base_alloc_coherent_va
;
212 void *contig_base_coherent_va
;
213 void *contig_delta_va_pa
;
218 } sec_cma_coherent
[SEC_CMA_COHERENT_MAX
];
220 #endif /* BCM_SECURE_DMA */
222 #ifdef BCM_SECURE_DMA
223 static void * osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
,
224 bool iscache
, bool isdecr
);
225 static void osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
);
226 static int osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
227 sec_mem_elem_t
**list
);
228 static void osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
229 void *sec_list_base
);
230 static sec_mem_elem_t
* osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
,
231 int direction
, struct sec_cma_info
*ptr_cma_info
, uint offset
);
232 static void osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
);
233 static void osl_sec_dma_init_consistent(osl_t
*osh
);
234 static void *osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
,
236 static void osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
);
237 #endif /* BCM_SECURE_DMA */
239 #ifdef BCM_OBJECT_TRACE
240 /* don't clear the first 4 byte that is the pkt sn */
241 #define OSL_PKTTAG_CLEAR(p) \
243 struct sk_buff *s = (struct sk_buff *)(p); \
244 ASSERT(OSL_PKTTAG_SZ == 32); \
245 *(uint32 *)(&s->cb[4]) = 0; \
246 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
247 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
248 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
251 #define OSL_PKTTAG_CLEAR(p) \
253 struct sk_buff *s = (struct sk_buff *)(p); \
254 ASSERT(OSL_PKTTAG_SZ == 32); \
255 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
256 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
257 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
258 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
260 #endif /* BCM_OBJECT_TRACE */
262 /* PCMCIA attribute space access macros */
264 uint32 g_assert_type
= 0; /* By Default Kernel Panic */
266 module_param(g_assert_type
, int, 0);
267 #ifdef BCM_SECURE_DMA
268 #define SECDMA_MODULE_PARAMS 0
269 #define SECDMA_EXT_FILE 1
270 unsigned long secdma_addr
= 0;
271 unsigned long secdma_addr2
= 0;
273 u32 secdma_size2
= 0;
274 module_param(secdma_addr
, ulong
, 0);
275 module_param(secdma_size
, int, 0);
276 module_param(secdma_addr2
, ulong
, 0);
277 module_param(secdma_size2
, int, 0);
278 static int secdma_found
= 0;
279 #endif /* BCM_SECURE_DMA */
281 static int16 linuxbcmerrormap
[] =
283 -EINVAL
, /* BCME_ERROR */
284 -EINVAL
, /* BCME_BADARG */
285 -EINVAL
, /* BCME_BADOPTION */
286 -EINVAL
, /* BCME_NOTUP */
287 -EINVAL
, /* BCME_NOTDOWN */
288 -EINVAL
, /* BCME_NOTAP */
289 -EINVAL
, /* BCME_NOTSTA */
290 -EINVAL
, /* BCME_BADKEYIDX */
291 -EINVAL
, /* BCME_RADIOOFF */
292 -EINVAL
, /* BCME_NOTBANDLOCKED */
293 -EINVAL
, /* BCME_NOCLK */
294 -EINVAL
, /* BCME_BADRATESET */
295 -EINVAL
, /* BCME_BADBAND */
296 -E2BIG
, /* BCME_BUFTOOSHORT */
297 -E2BIG
, /* BCME_BUFTOOLONG */
298 -EBUSY
, /* BCME_BUSY */
299 -EINVAL
, /* BCME_NOTASSOCIATED */
300 -EINVAL
, /* BCME_BADSSIDLEN */
301 -EINVAL
, /* BCME_OUTOFRANGECHAN */
302 -EINVAL
, /* BCME_BADCHAN */
303 -EFAULT
, /* BCME_BADADDR */
304 -ENOMEM
, /* BCME_NORESOURCE */
305 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
306 -EMSGSIZE
, /* BCME_BADLENGTH */
307 -EINVAL
, /* BCME_NOTREADY */
308 -EPERM
, /* BCME_EPERM */
309 -ENOMEM
, /* BCME_NOMEM */
310 -EINVAL
, /* BCME_ASSOCIATED */
311 -ERANGE
, /* BCME_RANGE */
312 -EINVAL
, /* BCME_NOTFOUND */
313 -EINVAL
, /* BCME_WME_NOT_ENABLED */
314 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
315 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
316 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
317 -EIO
, /* BCME_SDIO_ERROR */
318 -ENODEV
, /* BCME_DONGLE_DOWN */
319 -EINVAL
, /* BCME_VERSION */
320 -EIO
, /* BCME_TXFAIL */
321 -EIO
, /* BCME_RXFAIL */
322 -ENODEV
, /* BCME_NODEVICE */
323 -EINVAL
, /* BCME_NMODE_DISABLED */
324 -ENODATA
, /* BCME_NONRESIDENT */
325 -EINVAL
, /* BCME_SCANREJECT */
326 -EINVAL
, /* BCME_USAGE_ERROR */
327 -EIO
, /* BCME_IOCTL_ERROR */
328 -EIO
, /* BCME_SERIAL_PORT_ERR */
329 -EOPNOTSUPP
, /* BCME_DISABLED, BCME_NOTENABLED */
330 -EIO
, /* BCME_DECERR */
331 -EIO
, /* BCME_ENCERR */
332 -EIO
, /* BCME_MICERR */
333 -ERANGE
, /* BCME_REPLAY */
334 -EINVAL
, /* BCME_IE_NOTFOUND */
335 -EINVAL
, /* BCME_DATA_NOTFOUND */
336 -EINVAL
, /* BCME_NOT_GC */
337 -EINVAL
, /* BCME_PRS_REQ_FAILED */
338 -EINVAL
, /* BCME_NO_P2P_SE */
339 -EINVAL
, /* BCME_NOA_PND */
340 -EINVAL
, /* BCME_FRAG_Q_FAILED */
341 -EINVAL
, /* BCME_GET_AF_FAILED */
342 -EINVAL
, /* BCME_MSCH_NOTREADY */
344 /* When an new error code is added to bcmutils.h, add os
345 * specific error translation here as well
347 /* check if BCME_LAST changed since the last time this function was updated */
349 #error "You need to add a OS error translation in the linuxbcmerrormap \
350 for new error code defined in bcmutils.h"
355 /* translate bcmerrors into linux errors */
357 osl_error(int bcmerror
)
361 else if (bcmerror
< BCME_LAST
)
362 bcmerror
= BCME_ERROR
;
364 /* Array bounds covered by ASSERT in osl_attach */
365 return linuxbcmerrormap
[-bcmerror
];
368 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
370 void **osl_cmn
= NULL
;
373 #ifdef BCM_SECURE_DMA
377 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
378 if (!(osh
= kmalloc(sizeof(osl_t
), flags
)))
383 bzero(osh
, sizeof(osl_t
));
385 if (osl_cmn
== NULL
|| *osl_cmn
== NULL
) {
386 if (!(osh
->cmn
= kmalloc(sizeof(osl_cmn_t
), flags
))) {
390 bzero(osh
->cmn
, sizeof(osl_cmn_t
));
393 atomic_set(&osh
->cmn
->malloced
, 0);
394 osh
->cmn
->dbgmem_list
= NULL
;
395 spin_lock_init(&(osh
->cmn
->dbgmem_lock
));
397 spin_lock_init(&(osh
->cmn
->pktalloc_lock
));
402 atomic_add(1, &osh
->cmn
->refcount
);
404 bcm_object_trace_init();
406 /* Check that error map has the right number of entries in it */
407 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
411 osh
->pub
.pkttag
= pkttag
;
412 osh
->bustype
= bustype
;
413 osh
->magic
= OS_HANDLE_MAGIC
;
414 #ifdef BCM_SECURE_DMA
416 if ((secdma_addr
!= 0) && (secdma_size
!= 0)) {
417 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
418 if (secdma_found
== 0) {
419 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr
;
420 secdma_memsize
= secdma_size
;
421 } else if (secdma_found
== 1) {
422 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr2
;
423 secdma_memsize
= secdma_size2
;
425 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
429 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
430 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
431 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
432 (unsigned int)osh
->contig_base_alloc
);
433 osh
->stb_ext_params
= SECDMA_MODULE_PARAMS
;
435 else if (stbpriv_init(osh
) == 0) {
436 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
437 if (secdma_found
== 0) {
438 osh
->contig_base_alloc
=
439 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL
, 0);
440 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size"), NULL
, 0);
441 } else if (secdma_found
== 1) {
442 osh
->contig_base_alloc
=
443 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL
, 0);
444 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL
, 0);
446 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
450 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
451 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
452 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
453 (unsigned int)osh
->contig_base_alloc
);
454 osh
->stb_ext_params
= SECDMA_EXT_FILE
;
457 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
462 osh
->contig_base_alloc_coherent_va
= osl_sec_dma_ioremap(osh
,
463 phys_to_page((u32
)osh
->contig_base_alloc
),
464 CMA_DMA_DESC_MEMBLOCK
, FALSE
, TRUE
);
466 if (osh
->contig_base_alloc_coherent_va
== NULL
) {
472 osh
->contig_base_coherent_va
= osh
->contig_base_alloc_coherent_va
;
473 osh
->contig_base_alloc_coherent
= osh
->contig_base_alloc
;
474 osl_sec_dma_init_consistent(osh
);
476 osh
->contig_base_alloc
+= CMA_DMA_DESC_MEMBLOCK
;
478 osh
->contig_base_alloc_va
= osl_sec_dma_ioremap(osh
,
479 phys_to_page((u32
)osh
->contig_base_alloc
), CMA_DMA_DATA_MEMBLOCK
, TRUE
, FALSE
);
480 if (osh
->contig_base_alloc_va
== NULL
) {
481 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
487 osh
->contig_base_va
= osh
->contig_base_alloc_va
;
491 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
492 * osh->sec_list_base_512 = osh->sec_list_512;
493 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
494 * osh->sec_list_base_2048 = osh->sec_list_2048;
497 if (BCME_OK
!= osl_sec_dma_init_elem_mem_block(osh
,
498 CMA_BUFSIZE_4K
, CMA_BUFNUM
, &osh
->sec_list_4096
)) {
499 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
500 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
506 osh
->sec_list_base_4096
= osh
->sec_list_4096
;
508 #endif /* BCM_SECURE_DMA */
514 osh
->pub
.mmbus
= TRUE
;
521 osh
->pub
.mmbus
= FALSE
;
533 int osl_static_mem_init(osl_t
*osh
, void *adapter
)
535 #ifdef CONFIG_DHD_USE_STATIC_BUF
536 if (!bcm_static_buf
&& adapter
) {
537 if (!(bcm_static_buf
= (bcm_static_buf_t
*)wifi_platform_prealloc(adapter
,
538 3, STATIC_BUF_SIZE
+ STATIC_BUF_TOTAL_LEN
))) {
539 printk("can not alloc static buf!\n");
540 bcm_static_skb
= NULL
;
541 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
544 printk("alloc static buf at %p!\n", bcm_static_buf
);
547 spin_lock_init(&bcm_static_buf
->static_lock
);
549 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
552 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
553 if (!bcm_static_skb
&& adapter
) {
555 void *skb_buff_ptr
= 0;
556 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
557 skb_buff_ptr
= wifi_platform_prealloc(adapter
, 4, 0);
559 printk("cannot alloc static buf!\n");
560 bcm_static_buf
= NULL
;
561 bcm_static_skb
= NULL
;
562 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
566 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*) *
567 (STATIC_PKT_MAX_NUM
));
568 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
569 bcm_static_skb
->pkt_use
[i
] = 0;
572 #ifdef DHD_USE_STATIC_CTRLBUF
573 spin_lock_init(&bcm_static_skb
->osl_pkt_lock
);
574 bcm_static_skb
->last_allocated_index
= 0;
576 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
577 #endif /* DHD_USE_STATIC_CTRLBUF */
579 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
580 #endif /* CONFIG_DHD_USE_STATIC_BUF */
585 void osl_set_bus_handle(osl_t
*osh
, void *bus_handle
)
587 osh
->bus_handle
= bus_handle
;
590 void* osl_get_bus_handle(osl_t
*osh
)
592 return osh
->bus_handle
;
596 osl_detach(osl_t
*osh
)
601 #ifdef BCM_SECURE_DMA
602 if (osh
->stb_ext_params
== SECDMA_EXT_FILE
)
605 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_512
, CMA_BUFNUM
, osh
->sec_list_base_512
);
606 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_2K
, CMA_BUFNUM
, osh
->sec_list_base_2048
);
608 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_4K
, CMA_BUFNUM
, osh
->sec_list_base_4096
);
609 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
610 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
612 #endif /* BCM_SECURE_DMA */
615 bcm_object_trace_deinit();
617 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
618 atomic_sub(1, &osh
->cmn
->refcount
);
619 if (atomic_read(&osh
->cmn
->refcount
) == 0) {
625 int osl_static_mem_deinit(osl_t
*osh
, void *adapter
)
627 #ifdef CONFIG_DHD_USE_STATIC_BUF
628 if (bcm_static_buf
) {
632 if (bcm_static_skb
) {
636 #endif /* CONFIG_DHD_USE_STATIC_BUF */
640 /* APIs to set/get specific quirks in OSL layer */
642 osl_flag_set(osl_t
*osh
, uint32 mask
)
648 osl_flag_clr(osl_t
*osh
, uint32 mask
)
654 inline bool BCMFASTPATH
658 osl_is_flag_set(osl_t
*osh
, uint32 mask
)
660 return (osh
->flags
& mask
);
664 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
666 inline int BCMFASTPATH
667 osl_arch_is_coherent(void)
672 inline int BCMFASTPATH
673 osl_acp_war_enab(void)
678 inline void BCMFASTPATH
679 osl_cache_flush(void *va
, uint size
)
683 dma_sync_single_for_device(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
, DMA_TO_DEVICE
);
686 inline void BCMFASTPATH
687 osl_cache_inv(void *va
, uint size
)
690 dma_sync_single_for_cpu(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
, DMA_FROM_DEVICE
);
693 inline void BCMFASTPATH
694 osl_prefetch(const void *ptr
)
696 __asm__
__volatile__("pld\t%0" :: "o"(*(char *)ptr
) : "cc");
702 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
703 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
704 * explicitly managed from a coherency perspective.
706 static inline void BCMFASTPATH
707 osl_fwderbuf_reset(osl_t
*osh
, struct sk_buff
*skb
)
711 static struct sk_buff
*osl_alloc_skb(osl_t
*osh
, unsigned int len
)
714 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
715 gfp_t flags
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
716 #ifdef DHD_USE_ATOMIC_PKTGET
718 #endif /* DHD_USE_ATOMIC_PKTGET */
719 skb
= __dev_alloc_skb(len
, flags
);
721 skb
= dev_alloc_skb(len
);
722 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
728 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
729 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
731 * Allocate and add an object to packet pool.
734 osl_ctfpool_add(osl_t
*osh
)
738 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
741 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
742 ASSERT(osh
->ctfpool
->curr_obj
<= osh
->ctfpool
->max_obj
);
744 /* No need to allocate more objects */
745 if (osh
->ctfpool
->curr_obj
== osh
->ctfpool
->max_obj
) {
746 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
750 /* Allocate a new skb and add it to the ctfpool */
751 skb
= osl_alloc_skb(osh
, osh
->ctfpool
->obj_size
);
753 printf("%s: skb alloc of len %d failed\n", __FUNCTION__
,
754 osh
->ctfpool
->obj_size
);
755 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
760 skb
->next
= (struct sk_buff
*)osh
->ctfpool
->head
;
761 osh
->ctfpool
->head
= skb
;
762 osh
->ctfpool
->fast_frees
++;
763 osh
->ctfpool
->curr_obj
++;
765 /* Hijack a skb member to store ptr to ctfpool */
766 CTFPOOLPTR(osh
, skb
) = (void *)osh
->ctfpool
;
768 /* Use bit flag to indicate skb from fast ctfpool */
769 PKTFAST(osh
, skb
) = FASTBUF
;
771 /* If ctfpool's osh is a fwder osh, reset the fwder buf */
772 osl_fwderbuf_reset(osh
->ctfpool
->osh
, skb
);
774 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
780 * Add new objects to the pool.
783 osl_ctfpool_replenish(osl_t
*osh
, uint thresh
)
785 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
788 /* Do nothing if no refills are required */
789 while ((osh
->ctfpool
->refills
> 0) && (thresh
--)) {
790 osl_ctfpool_add(osh
);
791 osh
->ctfpool
->refills
--;
796 * Initialize the packet pool with specified number of objects.
799 osl_ctfpool_init(osl_t
*osh
, uint numobj
, uint size
)
803 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
804 osh
->ctfpool
= kzalloc(sizeof(ctfpool_t
), flags
);
805 ASSERT(osh
->ctfpool
);
807 osh
->ctfpool
->osh
= osh
;
809 osh
->ctfpool
->max_obj
= numobj
;
810 osh
->ctfpool
->obj_size
= size
;
812 spin_lock_init(&osh
->ctfpool
->lock
);
815 if (!osl_ctfpool_add(osh
))
817 osh
->ctfpool
->fast_frees
--;
824 * Cleanup the packet pool objects.
827 osl_ctfpool_cleanup(osl_t
*osh
)
829 struct sk_buff
*skb
, *nskb
;
831 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
834 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
836 skb
= osh
->ctfpool
->head
;
838 while (skb
!= NULL
) {
842 osh
->ctfpool
->curr_obj
--;
845 ASSERT(osh
->ctfpool
->curr_obj
== 0);
846 osh
->ctfpool
->head
= NULL
;
847 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
854 osl_ctfpool_stats(osl_t
*osh
, void *b
)
856 struct bcmstrbuf
*bb
;
858 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
861 #ifdef CONFIG_DHD_USE_STATIC_BUF
862 if (bcm_static_buf
) {
866 if (bcm_static_skb
) {
870 #endif /* CONFIG_DHD_USE_STATIC_BUF */
874 ASSERT((osh
!= NULL
) && (bb
!= NULL
));
876 bcm_bprintf(bb
, "max_obj %d obj_size %d curr_obj %d refills %d\n",
877 osh
->ctfpool
->max_obj
, osh
->ctfpool
->obj_size
,
878 osh
->ctfpool
->curr_obj
, osh
->ctfpool
->refills
);
879 bcm_bprintf(bb
, "fast_allocs %d fast_frees %d slow_allocs %d\n",
880 osh
->ctfpool
->fast_allocs
, osh
->ctfpool
->fast_frees
,
881 osh
->ctfpool
->slow_allocs
);
884 static inline struct sk_buff
*
885 osl_pktfastget(osl_t
*osh
, uint len
)
889 /* Try to do fast allocate. Return null if ctfpool is not in use
890 * or if there are no items in the ctfpool.
892 if (osh
->ctfpool
== NULL
)
895 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
896 if (osh
->ctfpool
->head
== NULL
) {
897 ASSERT(osh
->ctfpool
->curr_obj
== 0);
898 osh
->ctfpool
->slow_allocs
++;
899 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
903 if (len
> osh
->ctfpool
->obj_size
) {
904 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
908 ASSERT(len
<= osh
->ctfpool
->obj_size
);
910 /* Get an object from ctfpool */
911 skb
= (struct sk_buff
*)osh
->ctfpool
->head
;
912 osh
->ctfpool
->head
= (void *)skb
->next
;
914 osh
->ctfpool
->fast_allocs
++;
915 osh
->ctfpool
->curr_obj
--;
916 ASSERT(CTFPOOLHEAD(osh
, skb
) == (struct sock
*)osh
->ctfpool
->head
);
917 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
919 /* Init skb struct */
920 skb
->next
= skb
->prev
= NULL
;
921 #if defined(__ARM_ARCH_7A__)
922 skb
->data
= skb
->head
+ NET_SKB_PAD
;
923 skb
->tail
= skb
->head
+ NET_SKB_PAD
;
925 skb
->data
= skb
->head
+ 16;
926 skb
->tail
= skb
->head
+ 16;
927 #endif /* __ARM_ARCH_7A__ */
930 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
933 atomic_set(&skb
->users
, 1);
935 PKTSETCLINK(skb
, NULL
);
937 PKTFAST(osh
, skb
) &= ~(CTFBUF
| SKIPCT
| CHAINED
);
944 /* Convert a driver packet to native(OS) packet
945 * In the process, packettag is zeroed out before sending up
946 * IP code depends on skb->cb to be setup correctly with various options
947 * In our case, that means it should be 0
949 struct sk_buff
* BCMFASTPATH
950 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
952 struct sk_buff
*nskb
;
955 OSL_PKTTAG_CLEAR(pkt
);
957 /* Decrement the packet counter */
958 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
959 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->cmn
->pktalloced
);
962 return (struct sk_buff
*)pkt
;
965 /* Convert a native(OS) packet to driver packet.
966 * In the process, native packet is destroyed, there is no copying
967 * Also, a packettag is zeroed out
970 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
972 struct sk_buff
*cskb
;
973 struct sk_buff
*nskb
;
974 unsigned long pktalloced
= 0;
977 OSL_PKTTAG_CLEAR(pkt
);
979 /* walk the PKTCLINK() list */
980 for (cskb
= (struct sk_buff
*)pkt
;
982 cskb
= PKTISCHAINED(cskb
) ? PKTCLINK(cskb
) : NULL
) {
984 /* walk the pkt buffer list */
985 for (nskb
= cskb
; nskb
; nskb
= nskb
->next
) {
987 /* Increment the packet counter */
990 /* clean the 'prev' pointer
991 * Kernel 3.18 is leaving skb->prev pointer set to skb
992 * to indicate a non-fragmented skb
994 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
1002 /* Increment the packet counter */
1003 atomic_add(pktalloced
, &osh
->cmn
->pktalloced
);
1008 /* Return a new packet. zero out pkttag */
1009 #ifdef BCM_OBJECT_TRACE
1011 osl_pktget(osl_t
*osh
, uint len
, int line
, const char *caller
)
1014 osl_pktget(osl_t
*osh
, uint len
)
1015 #endif /* BCM_OBJECT_TRACE */
1017 struct sk_buff
*skb
;
1019 if (lmtest
!= FALSE
) {
1020 get_random_bytes(&num
, sizeof(uchar
));
1021 if ((num
+ 1) <= (256 * lmtest
/ 100))
1026 /* Allocate from local pool */
1027 skb
= osl_pktfastget(osh
, len
);
1028 if ((skb
!= NULL
) || ((skb
= osl_alloc_skb(osh
, len
)) != NULL
)) {
1030 if ((skb
= osl_alloc_skb(osh
, len
))) {
1031 #endif /* CTFPOOL */
1036 atomic_inc(&osh
->cmn
->pktalloced
);
1037 #ifdef BCM_OBJECT_TRACE
1038 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
1039 #endif /* BCM_OBJECT_TRACE */
1042 return ((void*) skb
);
1047 osl_pktfastfree(osl_t
*osh
, struct sk_buff
*skb
)
1051 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1052 skb
->tstamp
.tv
.sec
= 0;
1054 skb
->stamp
.tv_sec
= 0;
1057 /* We only need to init the fields that we change */
1059 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
1062 OSL_PKTTAG_CLEAR(skb
);
1065 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1068 skb
->destructor
= NULL
;
1071 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
1072 ASSERT(ctfpool
!= NULL
);
1074 /* if osh is a fwder osh, reset the fwder buf */
1075 osl_fwderbuf_reset(ctfpool
->osh
, skb
);
1077 /* Add object to the ctfpool */
1078 CTFPOOL_LOCK(ctfpool
, flags
);
1079 skb
->next
= (struct sk_buff
*)ctfpool
->head
;
1080 ctfpool
->head
= (void *)skb
;
1082 ctfpool
->fast_frees
++;
1083 ctfpool
->curr_obj
++;
1085 ASSERT(ctfpool
->curr_obj
<= ctfpool
->max_obj
);
1086 CTFPOOL_UNLOCK(ctfpool
, flags
);
1088 #endif /* CTFPOOL */
1090 /* Free the driver packet. Free the tag if present */
1091 #ifdef BCM_OBJECT_TRACE
1093 osl_pktfree(osl_t
*osh
, void *p
, bool send
, int line
, const char *caller
)
1096 osl_pktfree(osl_t
*osh
, void *p
, bool send
)
1097 #endif /* BCM_OBJECT_TRACE */
1099 struct sk_buff
*skb
, *nskb
;
1103 skb
= (struct sk_buff
*) p
;
1105 if (send
&& osh
->pub
.tx_fn
)
1106 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
1108 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
1110 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
1111 if (skb
&& (skb
->mac_len
== PREALLOC_USED_MAGIC
)) {
1112 printk("%s: pkt %p is from static pool\n",
1118 if (skb
&& (skb
->mac_len
== PREALLOC_FREE_MAGIC
)) {
1119 printk("%s: pkt %p is from static pool and not in used\n",
1124 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
1126 /* perversion: we use skb->next to chain multi-skb packets */
1133 #ifdef BCM_OBJECT_TRACE
1134 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, caller
, line
);
1135 #endif /* BCM_OBJECT_TRACE */
1138 if (PKTISFAST(osh
, skb
)) {
1139 if (atomic_read(&skb
->users
) == 1)
1141 else if (!atomic_dec_and_test(&skb
->users
))
1143 osl_pktfastfree(osh
, skb
);
1147 dev_kfree_skb_any(skb
);
1152 atomic_dec(&osh
->cmn
->pktalloced
);
1157 #ifdef CONFIG_DHD_USE_STATIC_BUF
1159 osl_pktget_static(osl_t
*osh
, uint len
)
1162 struct sk_buff
*skb
;
1163 #ifdef DHD_USE_STATIC_CTRLBUF
1164 unsigned long flags
;
1165 #endif /* DHD_USE_STATIC_CTRLBUF */
1167 if (!bcm_static_skb
)
1168 return osl_pktget(osh
, len
);
1170 if (len
> DHD_SKB_MAX_BUFSIZE
) {
1171 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
1172 return osl_pktget(osh
, len
);
1175 #ifdef DHD_USE_STATIC_CTRLBUF
1176 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
1178 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
1180 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
1181 index
= bcm_static_skb
->last_allocated_index
% STATIC_PKT_2PAGE_NUM
;
1182 bcm_static_skb
->last_allocated_index
++;
1183 if (bcm_static_skb
->skb_8k
[index
] &&
1184 bcm_static_skb
->pkt_use
[index
] == 0) {
1189 if ((i
!= STATIC_PKT_2PAGE_NUM
) &&
1190 (index
>= 0) && (index
< STATIC_PKT_2PAGE_NUM
)) {
1191 bcm_static_skb
->pkt_use
[index
] = 1;
1192 skb
= bcm_static_skb
->skb_8k
[index
];
1193 skb
->data
= skb
->head
;
1194 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1195 skb_set_tail_pointer(skb
, NET_SKB_PAD
);
1197 skb
->tail
= skb
->data
+ NET_SKB_PAD
;
1198 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1199 skb
->data
+= NET_SKB_PAD
;
1202 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1203 skb_set_tail_pointer(skb
, len
);
1205 skb
->tail
= skb
->data
+ len
;
1206 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1208 skb
->mac_len
= PREALLOC_USED_MAGIC
;
1209 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
1214 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
1215 printk("%s: all static pkt in use!\n", __FUNCTION__
);
1218 down(&bcm_static_skb
->osl_pkt_sem
);
1220 if (len
<= DHD_SKB_1PAGE_BUFSIZE
) {
1221 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
1222 if (bcm_static_skb
->skb_4k
[i
] &&
1223 bcm_static_skb
->pkt_use
[i
] == 0) {
1228 if (i
!= STATIC_PKT_1PAGE_NUM
) {
1229 bcm_static_skb
->pkt_use
[i
] = 1;
1231 skb
= bcm_static_skb
->skb_4k
[i
];
1232 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1233 skb_set_tail_pointer(skb
, len
);
1235 skb
->tail
= skb
->data
+ len
;
1236 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1239 up(&bcm_static_skb
->osl_pkt_sem
);
1244 if (len
<= DHD_SKB_2PAGE_BUFSIZE
) {
1245 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
1246 if (bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
] &&
1247 bcm_static_skb
->pkt_use
[i
] == 0) {
1252 if ((i
>= STATIC_PKT_1PAGE_NUM
) && (i
< STATIC_PKT_1_2PAGE_NUM
)) {
1253 bcm_static_skb
->pkt_use
[i
] = 1;
1254 skb
= bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
];
1255 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1256 skb_set_tail_pointer(skb
, len
);
1258 skb
->tail
= skb
->data
+ len
;
1259 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1262 up(&bcm_static_skb
->osl_pkt_sem
);
1267 #if defined(ENHANCED_STATIC_BUF)
1268 if (bcm_static_skb
->skb_16k
&&
1269 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] == 0) {
1270 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 1;
1272 skb
= bcm_static_skb
->skb_16k
;
1273 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1274 skb_set_tail_pointer(skb
, len
);
1276 skb
->tail
= skb
->data
+ len
;
1277 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1280 up(&bcm_static_skb
->osl_pkt_sem
);
1283 #endif /* ENHANCED_STATIC_BUF */
1285 up(&bcm_static_skb
->osl_pkt_sem
);
1286 printk("%s: all static pkt in use!\n", __FUNCTION__
);
1287 return osl_pktget(osh
, len
);
1288 #endif /* DHD_USE_STATIC_CTRLBUF */
1292 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
1295 #ifdef DHD_USE_STATIC_CTRLBUF
1296 struct sk_buff
*skb
= (struct sk_buff
*)p
;
1297 unsigned long flags
;
1298 #endif /* DHD_USE_STATIC_CTRLBUF */
1304 if (!bcm_static_skb
) {
1305 osl_pktfree(osh
, p
, send
);
1309 #ifdef DHD_USE_STATIC_CTRLBUF
1310 spin_lock_irqsave(&bcm_static_skb
->osl_pkt_lock
, flags
);
1312 for (i
= 0; i
< STATIC_PKT_2PAGE_NUM
; i
++) {
1313 if (p
== bcm_static_skb
->skb_8k
[i
]) {
1314 if (bcm_static_skb
->pkt_use
[i
] == 0) {
1315 printk("%s: static pkt idx %d(%p) is double free\n",
1316 __FUNCTION__
, i
, p
);
1318 bcm_static_skb
->pkt_use
[i
] = 0;
1321 if (skb
->mac_len
!= PREALLOC_USED_MAGIC
) {
1322 printk("%s: static pkt idx %d(%p) is not in used\n",
1323 __FUNCTION__
, i
, p
);
1326 skb
->mac_len
= PREALLOC_FREE_MAGIC
;
1327 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
1332 spin_unlock_irqrestore(&bcm_static_skb
->osl_pkt_lock
, flags
);
1333 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__
, p
);
1335 down(&bcm_static_skb
->osl_pkt_sem
);
1336 for (i
= 0; i
< STATIC_PKT_1PAGE_NUM
; i
++) {
1337 if (p
== bcm_static_skb
->skb_4k
[i
]) {
1338 bcm_static_skb
->pkt_use
[i
] = 0;
1339 up(&bcm_static_skb
->osl_pkt_sem
);
1344 for (i
= STATIC_PKT_1PAGE_NUM
; i
< STATIC_PKT_1_2PAGE_NUM
; i
++) {
1345 if (p
== bcm_static_skb
->skb_8k
[i
- STATIC_PKT_1PAGE_NUM
]) {
1346 bcm_static_skb
->pkt_use
[i
] = 0;
1347 up(&bcm_static_skb
->osl_pkt_sem
);
1351 #ifdef ENHANCED_STATIC_BUF
1352 if (p
== bcm_static_skb
->skb_16k
) {
1353 bcm_static_skb
->pkt_use
[STATIC_PKT_MAX_NUM
- 1] = 0;
1354 up(&bcm_static_skb
->osl_pkt_sem
);
1358 up(&bcm_static_skb
->osl_pkt_sem
);
1359 #endif /* DHD_USE_STATIC_CTRLBUF */
1360 osl_pktfree(osh
, p
, send
);
1362 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1365 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
1368 uint retry
= PCI_CFG_RETRY
;
1370 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1372 /* only 4byte access supported */
1376 pci_read_config_dword(osh
->pdev
, offset
, &val
);
1377 if (val
!= 0xffffffff)
1386 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
1388 uint retry
= PCI_CFG_RETRY
;
1390 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1392 /* only 4byte access supported */
1396 pci_write_config_dword(osh
->pdev
, offset
, val
);
1397 if (offset
!= PCI_BAR0_WIN
)
1399 if (osl_pci_read_config(osh
, offset
, size
) == val
)
1405 /* return bus # for the pci device pointed by osh->pdev */
1407 osl_pci_bus(osl_t
*osh
)
1409 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1411 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1412 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
1414 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
1418 /* return slot # for the pci device pointed by osh->pdev */
1420 osl_pci_slot(osl_t
*osh
)
1422 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1424 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1425 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
) + 1;
1427 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
1431 /* return domain # for the pci device pointed by osh->pdev */
1433 osl_pcie_domain(osl_t
*osh
)
1435 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1437 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
1440 /* return bus # for the pci device pointed by osh->pdev */
1442 osl_pcie_bus(osl_t
*osh
)
1444 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1446 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
1449 /* return the pci device pointed by osh->pdev */
1451 osl_pci_device(osl_t
*osh
)
1453 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
1459 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
1464 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
1466 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
1470 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
1472 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
1476 osl_malloc(osl_t
*osh
, uint size
)
1481 /* only ASSERT if osh is defined */
1483 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1484 #ifdef CONFIG_DHD_USE_STATIC_BUF
1487 unsigned long irq_flags
;
1489 if ((size
>= PAGE_SIZE
)&&(size
<= STATIC_BUF_SIZE
))
1491 spin_lock_irqsave(&bcm_static_buf
->static_lock
, irq_flags
);
1493 for (i
= 0; i
< STATIC_BUF_MAX_NUM
; i
++)
1495 if (bcm_static_buf
->buf_use
[i
] == 0)
1499 if (i
== STATIC_BUF_MAX_NUM
)
1501 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
1502 printk("all static buff in use!\n");
1506 bcm_static_buf
->buf_use
[i
] = 1;
1507 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
1509 bzero(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
, size
);
1511 atomic_add(size
, &osh
->cmn
->malloced
);
1513 return ((void *)(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
));
1517 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1519 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
1520 if ((addr
= kmalloc(size
, flags
)) == NULL
) {
1525 if (osh
&& osh
->cmn
)
1526 atomic_add(size
, &osh
->cmn
->malloced
);
1532 osl_mallocz(osl_t
*osh
, uint size
)
1536 ptr
= osl_malloc(osh
, size
);
1546 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
1548 #ifdef CONFIG_DHD_USE_STATIC_BUF
1549 unsigned long flags
;
1553 if ((addr
> (void *)bcm_static_buf
) && ((unsigned char *)addr
1554 <= ((unsigned char *)bcm_static_buf
+ STATIC_BUF_TOTAL_LEN
)))
1558 buf_idx
= ((unsigned char *)addr
- bcm_static_buf
->buf_ptr
)/STATIC_BUF_SIZE
;
1560 spin_lock_irqsave(&bcm_static_buf
->static_lock
, flags
);
1561 bcm_static_buf
->buf_use
[buf_idx
] = 0;
1562 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, flags
);
1564 if (osh
&& osh
->cmn
) {
1565 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1566 atomic_sub(size
, &osh
->cmn
->malloced
);
1571 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1572 if (osh
&& osh
->cmn
) {
1573 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1575 ASSERT(size
<= osl_malloced(osh
));
1577 atomic_sub(size
, &osh
->cmn
->malloced
);
1583 osl_vmalloc(osl_t
*osh
, uint size
)
1587 /* only ASSERT if osh is defined */
1589 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1590 if ((addr
= vmalloc(size
)) == NULL
) {
1595 if (osh
&& osh
->cmn
)
1596 atomic_add(size
, &osh
->cmn
->malloced
);
1602 osl_vmallocz(osl_t
*osh
, uint size
)
1606 ptr
= osl_vmalloc(osh
, size
);
1616 osl_vmfree(osl_t
*osh
, void *addr
, uint size
)
1618 if (osh
&& osh
->cmn
) {
1619 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1621 ASSERT(size
<= osl_malloced(osh
));
1623 atomic_sub(size
, &osh
->cmn
->malloced
);
1629 osl_check_memleak(osl_t
*osh
)
1631 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1632 if (atomic_read(&osh
->cmn
->refcount
) == 1)
1633 return (atomic_read(&osh
->cmn
->malloced
));
1639 osl_malloced(osl_t
*osh
)
1641 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1642 return (atomic_read(&osh
->cmn
->malloced
));
1646 osl_malloc_failed(osl_t
*osh
)
1648 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1649 return (osh
->failed
);
1654 osl_dma_consistent_align(void)
1660 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, dmaaddr_t
*pap
)
1663 uint16 align
= (1 << align_bits
);
1664 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1666 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
1670 #ifndef BCM_SECURE_DMA
1671 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1672 va
= kmalloc(size
, GFP_ATOMIC
| __GFP_ZERO
);
1674 *pap
= (ulong
)__virt_to_phys((ulong
)va
);
1678 struct pci_dev
*hwdev
= osh
->pdev
;
1680 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1683 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
1684 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1685 va
= dma_alloc_coherent(&hwdev
->dev
, size
, &pap_lin
, flags
);
1687 PHYSADDRLOSET(*pap
, pap_lin
& 0xffffffff);
1688 PHYSADDRHISET(*pap
, (pap_lin
>> 32) & 0xffffffff);
1690 *pap
= (dmaaddr_t
)pap_lin
;
1691 #endif /* BCMDMA64OSL */
1693 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1695 va
= osl_sec_dma_alloc_consistent(osh
, size
, align_bits
, pap
);
1696 #endif /* BCM_SECURE_DMA */
1701 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
1705 #endif /* BCMDMA64OSL */
1706 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1708 #ifndef BCM_SECURE_DMA
1709 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1713 PHYSADDRTOULONG(pa
, paddr
);
1714 pci_free_consistent(osh
->pdev
, size
, va
, paddr
);
1716 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
1717 #endif /* BCMDMA64OSL */
1718 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1720 osl_sec_dma_free_consistent(osh
, va
, size
, pa
);
1721 #endif /* BCM_SECURE_DMA */
1724 dmaaddr_t BCMFASTPATH
1725 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*dmah
)
1729 dma_addr_t map_addr
;
1732 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1733 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1738 map_addr
= pci_map_single(osh
->pdev
, va
, size
, dir
);
1739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
1740 ret
= pci_dma_mapping_error(osh
->pdev
, map_addr
);
1741 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
1742 ret
= pci_dma_mapping_error(map_addr
);
1747 printk("%s: Failed to map memory\n", __FUNCTION__
);
1748 PHYSADDRLOSET(ret_addr
, 0);
1749 PHYSADDRHISET(ret_addr
, 0);
1751 PHYSADDRLOSET(ret_addr
, map_addr
& 0xffffffff);
1752 PHYSADDRHISET(ret_addr
, (map_addr
>> 32) & 0xffffffff);
1759 osl_dma_unmap(osl_t
*osh
, dmaaddr_t pa
, uint size
, int direction
)
1764 #endif /* BCMDMA64OSL */
1766 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1769 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1771 PHYSADDRTOULONG(pa
, paddr
);
1772 pci_unmap_single(osh
->pdev
, paddr
, size
, dir
);
1774 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
1775 #endif /* BCMDMA64OSL */
1778 /* OSL function for CPU relax */
1779 inline void BCMFASTPATH
1785 extern void osl_preempt_disable(osl_t
*osh
)
1790 extern void osl_preempt_enable(osl_t
*osh
)
1795 #if defined(BCMASSERT_LOG)
1797 osl_assert(const char *exp
, const char *file
, int line
)
1800 const char *basename
;
1802 basename
= strrchr(file
, '/');
1810 #ifdef BCMASSERT_LOG
1811 snprintf(tempbuf
, 64, "\"%s\": file \"%s\", line %d\n",
1812 exp
, basename
, line
);
1813 #endif /* BCMASSERT_LOG */
1816 switch (g_assert_type
) {
1818 panic("%s", tempbuf
);
1823 printk("%s", tempbuf
);
1826 printk("%s", tempbuf
);
1836 osl_delay(uint usec
)
1841 d
= MIN(usec
, 1000);
1850 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1852 usleep_range(ms
*1000, ms
*1000 + 1000);
1859 osl_sysuptime_us(void)
1864 do_gettimeofday(&tv
);
1865 /* tv_usec content is fraction of a second */
1866 usec
= (uint64
)tv
.tv_sec
* 1000000ul + tv
.tv_usec
;
1872 * The pkttag contents are NOT cloned.
1874 #ifdef BCM_OBJECT_TRACE
1876 osl_pktdup(osl_t
*osh
, void *skb
, int line
, const char *caller
)
1879 osl_pktdup(osl_t
*osh
, void *skb
)
1880 #endif /* BCM_OBJECT_TRACE */
1884 ASSERT(!PKTISCHAINED(skb
));
1886 /* clear the CTFBUF flag if set and map the rest of the buffer
1889 PKTCTFMAP(osh
, skb
);
1891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1892 if ((p
= pskb_copy((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1894 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1899 if (PKTISFAST(osh
, skb
)) {
1902 /* if the buffer allocated from ctfpool is cloned then
1903 * we can't be sure when it will be freed. since there
1904 * is a chance that we will be losing a buffer
1905 * from our pool, we increment the refill count for the
1906 * object to be alloced later.
1908 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
1909 ASSERT(ctfpool
!= NULL
);
1911 PKTCLRFAST(osh
, skb
);
1914 #endif /* CTFPOOL */
1916 /* Clear PKTC context */
1917 PKTSETCLINK(p
, NULL
);
1920 PKTCSETLEN(p
, PKTLEN(osh
, skb
));
1922 /* skb_clone copies skb->cb.. we don't want that */
1923 if (osh
->pub
.pkttag
)
1924 OSL_PKTTAG_CLEAR(p
);
1926 /* Increment the packet counter */
1927 atomic_inc(&osh
->cmn
->pktalloced
);
1928 #ifdef BCM_OBJECT_TRACE
1929 bcm_object_trace_opr(p
, BCM_OBJDBG_ADD_PKT
, caller
, line
);
1930 #endif /* BCM_OBJECT_TRACE */
1938 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1942 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1946 osl_pktalloced(osl_t
*osh
)
1948 if (atomic_read(&osh
->cmn
->refcount
) == 1)
1949 return (atomic_read(&osh
->cmn
->pktalloced
));
1959 get_random_bytes(&rand
, sizeof(rand
));
1964 /* Linux Kernel: File Operations: start */
1966 osl_os_open_image(char *filename
)
1970 fp
= filp_open(filename
, O_RDONLY
, 0);
1972 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1974 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1984 osl_os_get_image_block(char *buf
, int len
, void *image
)
1986 struct file
*fp
= (struct file
*)image
;
1992 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
2000 osl_os_close_image(void *image
)
2003 filp_close((struct file
*)image
, NULL
);
2007 osl_os_image_size(void *image
)
2009 int len
= 0, curroffset
;
2012 /* store the current offset */
2013 curroffset
= generic_file_llseek(image
, 0, 1);
2014 /* goto end of file to get length */
2015 len
= generic_file_llseek(image
, 0, 2);
2016 /* restore back the offset */
2017 generic_file_llseek(image
, curroffset
, 0);
2022 /* Linux Kernel: File Operations: end */
2024 #if (defined(STB) && defined(__arm__))
2025 inline void osl_pcie_rreg(osl_t
*osh
, ulong addr
, void *v
, uint size
)
2027 unsigned long flags
= 0;
2029 int acp_war_enab
= ACP_WAR_ENAB();
2031 if (osh
&& BUSTYPE(osh
->bustype
) == PCI_BUS
)
2034 if (pci_access
&& acp_war_enab
)
2035 spin_lock_irqsave(&l2x0_reg_lock
, flags
);
2039 *(uint8
*)v
= readb((volatile uint8
*)(addr
));
2041 case sizeof(uint16
):
2042 *(uint16
*)v
= readw((volatile uint16
*)(addr
));
2044 case sizeof(uint32
):
2045 *(uint32
*)v
= readl((volatile uint32
*)(addr
));
2047 case sizeof(uint64
):
2048 *(uint64
*)v
= *((volatile uint64
*)(addr
));
2052 if (pci_access
&& acp_war_enab
)
2053 spin_unlock_irqrestore(&l2x0_reg_lock
, flags
);
2057 #ifdef BCM_SECURE_DMA
2059 osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
, bool iscache
, bool isdecr
)
2066 size
= PAGE_ALIGN(size
);
2067 order
= get_order(size
);
2069 map
= kmalloc(sizeof(struct page
*) << order
, GFP_ATOMIC
);
2074 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
2078 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
, __pgprot(PAGE_KERNEL
));
2080 osh
->contig_delta_va_pa
= ((uint8
*)addr
- page_to_phys(page
));
2084 #if defined(__ARM_ARCH_7A__)
2085 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
,
2086 pgprot_noncached(__pgprot(PAGE_KERNEL
)));
2089 osh
->contig_delta_va_pa
= ((uint8
*)addr
- page_to_phys(page
));
2094 return (void *)addr
;
2098 osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
)
2100 vunmap(contig_base_va
);
2104 osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, sec_mem_elem_t
**list
)
2108 sec_mem_elem_t
*sec_mem_elem
;
2110 if ((sec_mem_elem
= kmalloc(sizeof(sec_mem_elem_t
)*(max
), GFP_ATOMIC
)) != NULL
) {
2112 *list
= sec_mem_elem
;
2113 bzero(sec_mem_elem
, sizeof(sec_mem_elem_t
)*(max
));
2114 for (i
= 0; i
< max
-1; i
++) {
2115 sec_mem_elem
->next
= (sec_mem_elem
+ 1);
2116 sec_mem_elem
->size
= mbsize
;
2117 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
2118 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
2120 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
2121 osh
->contig_base_alloc
+= mbsize
;
2122 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
2124 sec_mem_elem
= sec_mem_elem
+ 1;
2126 sec_mem_elem
->next
= NULL
;
2127 sec_mem_elem
->size
= mbsize
;
2128 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
2129 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
2131 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
2132 osh
->contig_base_alloc
+= mbsize
;
2133 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
2136 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__
);
2144 osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, void *sec_list_base
)
2147 kfree(sec_list_base
);
2150 static sec_mem_elem_t
* BCMFASTPATH
2151 osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
, int direction
,
2152 struct sec_cma_info
*ptr_cma_info
, uint offset
)
2154 sec_mem_elem_t
*sec_mem_elem
= NULL
;
2157 if (size
<= 512 && osh
->sec_list_512
) {
2158 sec_mem_elem
= osh
->sec_list_512
;
2159 osh
->sec_list_512
= sec_mem_elem
->next
;
2161 else if (size
<= 2048 && osh
->sec_list_2048
) {
2162 sec_mem_elem
= osh
->sec_list_2048
;
2163 osh
->sec_list_2048
= sec_mem_elem
->next
;
2167 ASSERT(osh
->sec_list_4096
);
2168 sec_mem_elem
= osh
->sec_list_4096
;
2169 osh
->sec_list_4096
= sec_mem_elem
->next
;
2170 #endif /* NOT_YET */
2172 sec_mem_elem
->next
= NULL
;
2174 if (ptr_cma_info
->sec_alloc_list_tail
) {
2175 ptr_cma_info
->sec_alloc_list_tail
->next
= sec_mem_elem
;
2176 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
2179 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
2180 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
2181 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
;
2182 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
2184 return sec_mem_elem
;
2187 static void BCMFASTPATH
2188 osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
)
2190 sec_mem_elem
->dma_handle
= 0x0;
2191 sec_mem_elem
->va
= NULL
;
2193 if (sec_mem_elem
->size
== 512) {
2194 sec_mem_elem
->next
= osh
->sec_list_512
;
2195 osh
->sec_list_512
= sec_mem_elem
;
2196 } else if (sec_mem_elem
->size
== 2048) {
2197 sec_mem_elem
->next
= osh
->sec_list_2048
;
2198 osh
->sec_list_2048
= sec_mem_elem
;
2199 } else if (sec_mem_elem
->size
== 4096) {
2200 #endif /* NOT_YET */
2201 sec_mem_elem
->next
= osh
->sec_list_4096
;
2202 osh
->sec_list_4096
= sec_mem_elem
;
2206 printf("%s free failed size=%d\n", __FUNCTION__
, sec_mem_elem
->size
);
2207 #endif /* NOT_YET */
2210 static sec_mem_elem_t
* BCMFASTPATH
2211 osl_sec_dma_find_rem_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
, dma_addr_t dma_handle
)
2213 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
2214 sec_mem_elem_t
*sec_prv_elem
= ptr_cma_info
->sec_alloc_list
;
2216 if (sec_mem_elem
->dma_handle
== dma_handle
) {
2218 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
2220 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
) {
2221 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
2222 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
2225 return sec_mem_elem
;
2227 sec_mem_elem
= sec_mem_elem
->next
;
2229 while (sec_mem_elem
!= NULL
) {
2231 if (sec_mem_elem
->dma_handle
== dma_handle
) {
2233 sec_prv_elem
->next
= sec_mem_elem
->next
;
2234 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
)
2235 ptr_cma_info
->sec_alloc_list_tail
= sec_prv_elem
;
2237 return sec_mem_elem
;
2239 sec_prv_elem
= sec_mem_elem
;
2240 sec_mem_elem
= sec_mem_elem
->next
;
2245 static sec_mem_elem_t
*
2246 osl_sec_dma_rem_first_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
2248 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
2252 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
2254 if (ptr_cma_info
->sec_alloc_list
== NULL
)
2255 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
2257 return sec_mem_elem
;
2263 static void * BCMFASTPATH
2264 osl_sec_dma_last_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
2266 return ptr_cma_info
->sec_alloc_list_tail
;
2269 dma_addr_t BCMFASTPATH
2270 osl_sec_dma_map_txmeta(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
2271 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
)
2273 sec_mem_elem_t
*sec_mem_elem
;
2274 struct page
*pa_cma_page
;
2276 void *vaorig
= ((uint8
*)va
+ size
);
2277 dma_addr_t dma_handle
= 0x0;
2278 /* packet will be the one added with osl_sec_dma_map() just before this call */
2280 sec_mem_elem
= osl_sec_dma_last_elem(osh
, ptr_cma_info
);
2282 if (sec_mem_elem
&& sec_mem_elem
->va
== vaorig
) {
2284 pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
2285 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
2287 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
2288 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
2291 printf("%s: error orig va not found va = 0x%p \n",
2292 __FUNCTION__
, vaorig
);
2297 dma_addr_t BCMFASTPATH
2298 osl_sec_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
2299 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
, uint offset
)
2302 sec_mem_elem_t
*sec_mem_elem
;
2303 struct page
*pa_cma_page
;
2304 void *pa_cma_kmap_va
= NULL
;
2306 dma_addr_t dma_handle
= 0x0;
2310 struct sk_buff
*skb
;
2312 #endif /* NOT_YET */
2314 ASSERT((direction
== DMA_RX
) || (direction
== DMA_TX
));
2315 sec_mem_elem
= osl_sec_dma_alloc_mem_elem(osh
, va
, size
, direction
, ptr_cma_info
, offset
);
2317 sec_mem_elem
->va
= va
;
2318 sec_mem_elem
->direction
= direction
;
2319 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
2321 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
2322 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2323 * pa_cma_kmap_va += loffset;
2326 pa_cma_kmap_va
= sec_mem_elem
->vac
;
2327 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ offset
);
2330 if (direction
== DMA_TX
) {
2331 memcpy((uint8
*)pa_cma_kmap_va
+offset
, va
, size
);
2336 memcpy(pa_cma_kmap_va
, va
, size
);
2337 /* prhex("Txpkt",pa_cma_kmap_va, size); */
2339 for (skb
= (struct sk_buff
*)p
; skb
!= NULL
; skb
= PKTNEXT(osh
, skb
)) {
2340 if (skb_is_nonlinear(skb
)) {
2343 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2344 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
2345 fragva
= kmap_atomic(skb_frag_page(f
));
2346 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
2347 memcpy((pa_cma_kmap_va
),
2348 (fragva
+ f
->page_offset
), skb_frag_size(f
));
2349 kunmap_atomic(fragva
);
2350 buflen
+= skb_frag_size(f
);
2354 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
2355 memcpy(pa_cma_kmap_va
, skb
->data
, skb
->len
);
2361 #endif /* NOT_YET */
2364 dmah
->origsize
= buflen
;
2369 if ((p
!= NULL
) && (dmah
!= NULL
)) {
2371 dmah
->origsize
= buflen
;
2373 *(uint32
*)(pa_cma_kmap_va
) = 0x0;
2376 if (direction
== DMA_RX
) {
2377 flush_kernel_vmap_range(pa_cma_kmap_va
, sizeof(int));
2379 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
+offset
, buflen
,
2380 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
2382 dmah
->segs
[0].addr
= dma_handle
;
2383 dmah
->segs
[0].length
= buflen
;
2385 sec_mem_elem
->dma_handle
= dma_handle
;
2386 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
2390 dma_addr_t BCMFASTPATH
2391 osl_sec_dma_dd_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*map
)
2394 struct page
*pa_cma_page
;
2396 dma_addr_t dma_handle
= 0x0;
2399 pa_cma
= ((uint8
*)va
- (uint8
*)osh
->contig_delta_va_pa
);
2400 pa_cma_page
= phys_to_page(pa_cma
);
2401 loffset
= pa_cma
-(pa_cma
& ~(PAGE_SIZE
-1));
2403 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
2404 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
2410 osl_sec_dma_unmap(osl_t
*osh
, dma_addr_t dma_handle
, uint size
, int direction
,
2411 void *p
, hnddma_seg_map_t
*map
, void *ptr_cma_info
, uint offset
)
2413 sec_mem_elem_t
*sec_mem_elem
;
2415 struct page
*pa_cma_page
;
2417 void *pa_cma_kmap_va
= NULL
;
2422 BCM_REFERENCE(buflen
);
2423 BCM_REFERENCE(read_count
);
2425 sec_mem_elem
= osl_sec_dma_find_rem_elem(osh
, ptr_cma_info
, dma_handle
);
2426 ASSERT(sec_mem_elem
);
2428 va
= sec_mem_elem
->va
;
2429 va
= (uint8
*)va
- offset
;
2430 pa_cma
= sec_mem_elem
->pa_cma
;
2433 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
2436 if (direction
== DMA_RX
) {
2440 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2441 * pa_cma_kmap_va += loffset;
2444 pa_cma_kmap_va
= sec_mem_elem
->vac
;
2447 invalidate_kernel_vmap_range(pa_cma_kmap_va
, sizeof(int));
2449 buflen
= *(uint
*)(pa_cma_kmap_va
);
2455 } while (read_count
< 200);
2456 dma_unmap_page(OSH_NULL
, pa_cma
, size
, DMA_FROM_DEVICE
);
2457 memcpy(va
, pa_cma_kmap_va
, size
);
2458 /* kunmap_atomic(pa_cma_kmap_va); */
2463 for (skb
= (struct sk_buff
*)p
; (buflen
< size
) &&
2464 (skb
!= NULL
); skb
= skb
->next
) {
2465 if (skb_is_nonlinear(skb
)) {
2466 pa_cma_kmap_va
= kmap_atomic(pa_cma_page
);
2467 for (i
= 0; (buflen
< size
) &&
2468 (i
< skb_shinfo(skb
)->nr_frags
); i
++) {
2469 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
2470 cpuaddr
= kmap_atomic(skb_frag_page(f
));
2471 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
2472 memcpy((cpuaddr
+ f
->page_offset
),
2473 pa_cma_kmap_va
, skb_frag_size(f
));
2474 kunmap_atomic(cpuaddr
);
2475 buflen
+= skb_frag_size(f
);
2477 kunmap_atomic(pa_cma_kmap_va
);
2479 pa_cma_kmap_va
= kmap_atomic(pa_cma_page
);
2480 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
2481 memcpy(skb
->data
, pa_cma_kmap_va
, skb
->len
);
2482 kunmap_atomic(pa_cma_kmap_va
);
2489 #endif /* NOT YET */
2491 dma_unmap_page(OSH_NULL
, pa_cma
, size
+offset
, DMA_TO_DEVICE
);
2494 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
2498 osl_sec_dma_unmap_all(osl_t
*osh
, void *ptr_cma_info
)
2501 sec_mem_elem_t
*sec_mem_elem
;
2503 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
2505 while (sec_mem_elem
!= NULL
) {
2507 dma_unmap_page(OSH_NULL
, sec_mem_elem
->pa_cma
, sec_mem_elem
->size
,
2508 sec_mem_elem
->direction
== DMA_TX
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2509 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
2511 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
2516 osl_sec_dma_init_consistent(osl_t
*osh
)
2519 void *temp_va
= osh
->contig_base_alloc_coherent_va
;
2520 phys_addr_t temp_pa
= osh
->contig_base_alloc_coherent
;
2522 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
2523 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
2524 osh
->sec_cma_coherent
[i
].va
= temp_va
;
2525 osh
->sec_cma_coherent
[i
].pa
= temp_pa
;
2526 temp_va
= ((uint8
*)temp_va
)+SEC_CMA_COHERENT_BLK
;
2527 temp_pa
+= SEC_CMA_COHERENT_BLK
;
2532 osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, ulong
*pap
)
2535 void *temp_va
= NULL
;
2539 if (size
> SEC_CMA_COHERENT_BLK
) {
2540 printf("%s unsupported size\n", __FUNCTION__
);
2544 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
2545 if (osh
->sec_cma_coherent
[i
].avail
== TRUE
) {
2546 temp_va
= osh
->sec_cma_coherent
[i
].va
;
2547 temp_pa
= osh
->sec_cma_coherent
[i
].pa
;
2548 osh
->sec_cma_coherent
[i
].avail
= FALSE
;
2553 if (i
== SEC_CMA_COHERENT_MAX
)
2554 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
2555 temp_va
, (ulong
)temp_pa
, size
);
2557 *pap
= (unsigned long)temp_pa
;
2562 osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
2566 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
2567 if (osh
->sec_cma_coherent
[i
].va
== va
) {
2568 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
2572 if (i
== SEC_CMA_COHERENT_MAX
)
2573 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
2574 va
, (ulong
)pa
, size
);
2577 #endif /* BCM_SECURE_DMA */
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
2580 #include <linux/kallsyms.h>
2581 #include <net/sock.h>
2583 osl_pkt_orphan_partial(struct sk_buff
*skb
)
2586 static void *p_tcp_wfree
= NULL
;
2588 if (!skb
->destructor
|| skb
->destructor
== sock_wfree
)
2591 if (unlikely(!p_tcp_wfree
)) {
2592 char sym
[KSYM_SYMBOL_LEN
];
2593 sprint_symbol(sym
, (unsigned long)skb
->destructor
);
2595 if (!strcmp(sym
, "tcp_wfree"))
2596 p_tcp_wfree
= skb
->destructor
;
2601 if (unlikely(skb
->destructor
!= p_tcp_wfree
|| !skb
->sk
))
2604 /* abstract a certain portion of skb truesize from the socket
2605 * sk_wmem_alloc to allow more skb can be allocated for this
2606 * socket for better cusion meeting WiFi device requirement
2608 fraction
= skb
->truesize
* (TSQ_MULTIPLIER
- 1) / TSQ_MULTIPLIER
;
2609 skb
->truesize
-= fraction
;
2610 atomic_sub(fraction
, &skb
->sk
->sk_wmem_alloc
);
2612 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
2615 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
2618 osl_timer_init(osl_t
*osh
, const char *name
, void (*fn
)(void *arg
), void *arg
)
2622 if ((t
= MALLOCZ(NULL
, sizeof(osl_timer_t
))) == NULL
) {
2623 printk(KERN_ERR
"osl_timer_init: malloced failed for osl_timer_t\n");
2626 bzero(t
, sizeof(osl_timer_t
));
2627 if ((t
->timer
= MALLOCZ(NULL
, sizeof(struct timer_list
))) == NULL
) {
2628 printk(KERN_ERR
"osl_timer_init: malloc failed\n");
2629 MFREE(NULL
, t
, sizeof(osl_timer_t
));
2632 t
->timer
->data
= (ulong
)arg
;
2633 t
->timer
->function
= (linux_timer_fn
)fn
;
2636 init_timer(t
->timer
);
2642 osl_timer_add(osl_t
*osh
, osl_timer_t
*t
, uint32 ms
, bool periodic
)
2646 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
2653 printf("Periodic timers are not supported by Linux timer apis\n");
2655 t
->timer
->expires
= jiffies
+ ms
*HZ
/1000;
2657 add_timer(t
->timer
);
2663 osl_timer_update(osl_t
*osh
, osl_timer_t
*t
, uint32 ms
, bool periodic
)
2667 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
2671 printf("Periodic timers are not supported by Linux timer apis\n");
2674 t
->timer
->expires
= jiffies
+ ms
*HZ
/1000;
2676 mod_timer(t
->timer
, t
->timer
->expires
);
2682 * Return TRUE if timer successfully deleted, FALSE if still pending
2685 osl_timer_del(osl_t
*osh
, osl_timer_t
*t
)
2688 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
2694 del_timer(t
->timer
);
2695 MFREE(NULL
, t
->timer
, sizeof(struct timer_list
));
2697 MFREE(NULL
, t
, sizeof(osl_timer_t
));