adt3-S dhd_driver source code [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd-usb.1.363.110.17.x / linux_osl.c
1 /*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2016, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: linux_osl.c 589291 2015-09-29 07:09:00Z $
28 */
29
30 #define LINUX_PORT
31
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <linuxver.h>
35 #include <bcmdefs.h>
36
37 #ifdef mips
38 #include <asm/paccess.h>
39 #include <asm/cache.h>
40 #include <asm/r4kcache.h>
41 #undef ABS
42 #endif /* mips */
43
44 #if !defined(STBLINUX)
45 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
46 #include <asm/cacheflush.h>
47 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
48 #endif /* STBLINUX */
49
50 #include <linux/random.h>
51
52 #include <osl.h>
53 #include <bcmutils.h>
54 #include <linux/delay.h>
55 #include <pcicfg.h>
56 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
57 #include <asm-generic/pci-dma-compat.h>
58 #endif
59
60
61 #ifdef BCM_SECURE_DMA
62 #include <linux/module.h>
63 #include <linux/kernel.h>
64 #include <linux/io.h>
65 #include <linux/printk.h>
66 #include <linux/errno.h>
67 #include <linux/mm.h>
68 #include <linux/moduleparam.h>
69 #include <asm/io.h>
70 #include <linux/skbuff.h>
71 #include <linux/vmalloc.h>
72 #include <stbutils.h>
73 #include <linux/highmem.h>
74 #include <linux/dma-mapping.h>
75 #include <asm/memory.h>
76 #if defined(__ARM_ARCH_7A__)
77 #include <arch/arm/include/asm/tlbflush.h>
78 #endif
79 #endif /* BCM_SECURE_DMA */
80
81 #include <linux/fs.h>
82
83 #if defined(STB)
84 #include <linux/spinlock.h>
85 extern spinlock_t l2x0_reg_lock;
86 #endif
87
88
89 #define PCI_CFG_RETRY 10
90
91 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
92 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
93 #define DUMPBUFSZ 1024
94
95 /* dependancy check */
96 #if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
97 #error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
98 #endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
99
100 #ifdef CONFIG_DHD_USE_STATIC_BUF
101 #ifdef DHD_USE_STATIC_CTRLBUF
102 #define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
103 #define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
104 #define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
105
106 #define PREALLOC_FREE_MAGIC 0xFEDC
107 #define PREALLOC_USED_MAGIC 0xFCDE
108 #else
109 #define DHD_SKB_HDRSIZE 336
110 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
111 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
112 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
113 #endif /* DHD_USE_STATIC_CTRLBUF */
114
115 #define STATIC_BUF_MAX_NUM 16
116 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
117 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
118
119 typedef struct bcm_static_buf {
120 spinlock_t static_lock;
121 unsigned char *buf_ptr;
122 unsigned char buf_use[STATIC_BUF_MAX_NUM];
123 } bcm_static_buf_t;
124
125 static bcm_static_buf_t *bcm_static_buf = 0;
126
127 #ifdef DHD_USE_STATIC_CTRLBUF
128 #define STATIC_PKT_4PAGE_NUM 0
129 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
130 #elif defined(ENHANCED_STATIC_BUF)
131 #define STATIC_PKT_4PAGE_NUM 1
132 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
133 #else
134 #define STATIC_PKT_4PAGE_NUM 0
135 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
136 #endif /* DHD_USE_STATIC_CTRLBUF */
137
138 #ifdef DHD_USE_STATIC_CTRLBUF
139 #define STATIC_PKT_1PAGE_NUM 0
140 #define STATIC_PKT_2PAGE_NUM 64
141 #else
142 #define STATIC_PKT_1PAGE_NUM 8
143 #define STATIC_PKT_2PAGE_NUM 8
144 #endif /* DHD_USE_STATIC_CTRLBUF */
145
146 #define STATIC_PKT_1_2PAGE_NUM \
147 ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
148 #define STATIC_PKT_MAX_NUM \
149 ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
150
151 typedef struct bcm_static_pkt {
152 #ifdef DHD_USE_STATIC_CTRLBUF
153 struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
154 unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
155 spinlock_t osl_pkt_lock;
156 uint32 last_allocated_index;
157 #else
158 struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
159 struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
160 #ifdef ENHANCED_STATIC_BUF
161 struct sk_buff *skb_16k;
162 #endif /* ENHANCED_STATIC_BUF */
163 struct semaphore osl_pkt_sem;
164 #endif /* DHD_USE_STATIC_CTRLBUF */
165 unsigned char pkt_use[STATIC_PKT_MAX_NUM];
166 } bcm_static_pkt_t;
167
168 static bcm_static_pkt_t *bcm_static_skb = 0;
169
170 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
171 #endif /* CONFIG_DHD_USE_STATIC_BUF */
172
173 typedef struct bcm_mem_link {
174 struct bcm_mem_link *prev;
175 struct bcm_mem_link *next;
176 uint size;
177 int line;
178 void *osh;
179 char file[BCM_MEM_FILENAME_LEN];
180 } bcm_mem_link_t;
181
182 struct osl_cmn_info {
183 atomic_t malloced;
184 atomic_t pktalloced; /* Number of allocated packet buffers */
185 spinlock_t dbgmem_lock;
186 bcm_mem_link_t *dbgmem_list;
187 spinlock_t pktalloc_lock;
188 atomic_t refcount; /* Number of references to this shared structure. */
189 };
190 typedef struct osl_cmn_info osl_cmn_t;
191
192 struct osl_info {
193 osl_pubinfo_t pub;
194 uint32 flags; /* If specific cases to be handled in the OSL */
195 uint magic;
196 void *pdev;
197 uint failed;
198 uint bustype;
199 osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
200
201 void *bus_handle;
202 #ifdef BCM_SECURE_DMA
203 struct sec_mem_elem *sec_list_4096;
204 struct sec_mem_elem *sec_list_base_4096;
205 phys_addr_t contig_base;
206 void *contig_base_va;
207 phys_addr_t contig_base_alloc;
208 void *contig_base_alloc_va;
209 phys_addr_t contig_base_alloc_coherent;
210 void *contig_base_alloc_coherent_va;
211 void *contig_base_coherent_va;
212 phys_addr_t contig_delta_va_pa;
213 struct {
214 phys_addr_t pa;
215 void *va;
216 bool avail;
217 } sec_cma_coherent[SEC_CMA_COHERENT_MAX];
218 int stb_ext_params;
219 #endif /* BCM_SECURE_DMA */
220 };
221
222 #ifdef BCM_SECURE_DMA
223 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
224 bool iscache, bool isdecr);
225 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
226 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
227 sec_mem_elem_t **list);
228 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
229 void *sec_list_base);
230 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
231 int direction, struct sec_cma_info *ptr_cma_info, uint offset);
232 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
233 static void osl_sec_dma_init_consistent(osl_t *osh);
234 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
235 ulong *pap);
236 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
237 #endif /* BCM_SECURE_DMA */
238
239 #define OSL_PKTTAG_CLEAR(p) \
240 do { \
241 struct sk_buff *s = (struct sk_buff *)(p); \
242 ASSERT(OSL_PKTTAG_SZ == 32); \
243 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
244 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
245 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
246 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
247 } while (0)
248
249 /* PCMCIA attribute space access macros */
250
251 /* Global ASSERT type flag */
252 uint32 g_assert_type = 0;
253 module_param(g_assert_type, int, 0);
254
255 #ifdef BCM_SECURE_DMA
256 #define SECDMA_MODULE_PARAMS 0
257 #define SECDMA_EXT_FILE 1
258
259 unsigned long secdma_addr = 0;
260 unsigned long secdma_addr2 = 0;
261 u32 secdma_size = 0;
262 u32 secdma_size2 = 0;
263 module_param(secdma_addr, ulong, 0);
264 module_param(secdma_size, int, 0);
265 module_param(secdma_addr2, ulong, 0);
266 module_param(secdma_size2, int, 0);
267 static int secdma_found = 0;
268 #endif /* BCM_SECURE_DMA */
269
270 static int16 linuxbcmerrormap[] =
271 { 0, /* 0 */
272 -EINVAL, /* BCME_ERROR */
273 -EINVAL, /* BCME_BADARG */
274 -EINVAL, /* BCME_BADOPTION */
275 -EINVAL, /* BCME_NOTUP */
276 -EINVAL, /* BCME_NOTDOWN */
277 -EINVAL, /* BCME_NOTAP */
278 -EINVAL, /* BCME_NOTSTA */
279 -EINVAL, /* BCME_BADKEYIDX */
280 -EINVAL, /* BCME_RADIOOFF */
281 -EINVAL, /* BCME_NOTBANDLOCKED */
282 -EINVAL, /* BCME_NOCLK */
283 -EINVAL, /* BCME_BADRATESET */
284 -EINVAL, /* BCME_BADBAND */
285 -E2BIG, /* BCME_BUFTOOSHORT */
286 -E2BIG, /* BCME_BUFTOOLONG */
287 -EBUSY, /* BCME_BUSY */
288 -EINVAL, /* BCME_NOTASSOCIATED */
289 -EINVAL, /* BCME_BADSSIDLEN */
290 -EINVAL, /* BCME_OUTOFRANGECHAN */
291 -EINVAL, /* BCME_BADCHAN */
292 -EFAULT, /* BCME_BADADDR */
293 -ENOMEM, /* BCME_NORESOURCE */
294 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
295 -EMSGSIZE, /* BCME_BADLENGTH */
296 -EINVAL, /* BCME_NOTREADY */
297 -EPERM, /* BCME_EPERM */
298 -ENOMEM, /* BCME_NOMEM */
299 -EINVAL, /* BCME_ASSOCIATED */
300 -ERANGE, /* BCME_RANGE */
301 -EINVAL, /* BCME_NOTFOUND */
302 -EINVAL, /* BCME_WME_NOT_ENABLED */
303 -EINVAL, /* BCME_TSPEC_NOTFOUND */
304 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
305 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
306 -EIO, /* BCME_SDIO_ERROR */
307 -ENODEV, /* BCME_DONGLE_DOWN */
308 -EINVAL, /* BCME_VERSION */
309 -EIO, /* BCME_TXFAIL */
310 -EIO, /* BCME_RXFAIL */
311 -ENODEV, /* BCME_NODEVICE */
312 -EINVAL, /* BCME_NMODE_DISABLED */
313 -ENODATA, /* BCME_NONRESIDENT */
314 -EINVAL, /* BCME_SCANREJECT */
315 -EINVAL, /* BCME_USAGE_ERROR */
316 -EIO, /* BCME_IOCTL_ERROR */
317 -EIO, /* BCME_SERIAL_PORT_ERR */
318 -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
319 -EIO, /* BCME_DECERR */
320 -EIO, /* BCME_ENCERR */
321 -EIO, /* BCME_MICERR */
322 -ERANGE, /* BCME_REPLAY */
323 -EINVAL, /* BCME_IE_NOTFOUND */
324 -EINVAL, /* BCME_DATA_NOTFOUND */
325
326 /* When an new error code is added to bcmutils.h, add os
327 * specific error translation here as well
328 */
329 /* check if BCME_LAST changed since the last time this function was updated */
330 #if BCME_LAST != -53
331 #error "You need to add a OS error translation in the linuxbcmerrormap \
332 for new error code defined in bcmutils.h"
333 #endif
334 };
335 uint lmtest = FALSE;
336
337 /* translate bcmerrors into linux errors */
338 int
339 osl_error(int bcmerror)
340 {
341 if (bcmerror > 0)
342 bcmerror = 0;
343 else if (bcmerror < BCME_LAST)
344 bcmerror = BCME_ERROR;
345
346 /* Array bounds covered by ASSERT in osl_attach */
347 return linuxbcmerrormap[-bcmerror];
348 }
349
350 osl_t *
351 #ifdef SHARED_OSL_CMN
352 osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
353 #else
354 osl_attach(void *pdev, uint bustype, bool pkttag)
355 #endif /* SHARED_OSL_CMN */
356 {
357 #ifndef SHARED_OSL_CMN
358 void **osl_cmn = NULL;
359 #endif /* SHARED_OSL_CMN */
360 osl_t *osh;
361 gfp_t flags;
362
363 #ifdef BCM_SECURE_DMA
364 u32 secdma_memsize;
365 #endif
366
367 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
368 if (!(osh = kmalloc(sizeof(osl_t), flags)))
369 return osh;
370
371 ASSERT(osh);
372
373 bzero(osh, sizeof(osl_t));
374
375 if (osl_cmn == NULL || *osl_cmn == NULL) {
376 if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
377 kfree(osh);
378 return NULL;
379 }
380 bzero(osh->cmn, sizeof(osl_cmn_t));
381 if (osl_cmn)
382 *osl_cmn = osh->cmn;
383 atomic_set(&osh->cmn->malloced, 0);
384 osh->cmn->dbgmem_list = NULL;
385 spin_lock_init(&(osh->cmn->dbgmem_lock));
386
387 spin_lock_init(&(osh->cmn->pktalloc_lock));
388
389 } else {
390 osh->cmn = *osl_cmn;
391 }
392 atomic_add(1, &osh->cmn->refcount);
393
394 bcm_object_trace_init();
395
396 /* Check that error map has the right number of entries in it */
397 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
398
399 osh->failed = 0;
400 osh->pdev = pdev;
401 osh->pub.pkttag = pkttag;
402 osh->bustype = bustype;
403 osh->magic = OS_HANDLE_MAGIC;
404 #ifdef BCM_SECURE_DMA
405
406 if ((secdma_addr != 0) && (secdma_size != 0)) {
407 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
408
409 if (secdma_found == 0) {
410 osh->contig_base_alloc = (phys_addr_t)secdma_addr;
411 secdma_memsize = secdma_size;
412 } else if (secdma_found == 1) {
413 osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
414 secdma_memsize = secdma_size2;
415 } else {
416 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
417 kfree(osh);
418 return NULL;
419 }
420
421 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
422
423 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
424 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
425 (unsigned int)osh->contig_base_alloc);
426 osh->stb_ext_params = SECDMA_MODULE_PARAMS;
427 }
428 else if (stbpriv_init(osh) == 0) {
429 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
430
431 if (secdma_found == 0) {
432 osh->contig_base_alloc =
433 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
434 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
435 } else if (secdma_found == 1) {
436 osh->contig_base_alloc =
437 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
438 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
439 } else {
440 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
441 kfree(osh);
442 return NULL;
443 }
444
445 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
446
447 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
448 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
449 (unsigned int)osh->contig_base_alloc);
450 osh->stb_ext_params = SECDMA_EXT_FILE;
451 }
452 else {
453 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
454 kfree(osh);
455 return NULL;
456 }
457
458 secdma_found++;
459 osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
460 phys_to_page((u32)osh->contig_base_alloc),
461 CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
462
463 if (osh->contig_base_alloc_coherent_va == NULL) {
464 if (osh->cmn)
465 kfree(osh->cmn);
466 kfree(osh);
467 return NULL;
468 }
469 osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
470 osh->contig_base_alloc_coherent = osh->contig_base_alloc;
471 osl_sec_dma_init_consistent(osh);
472
473 osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
474
475 osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
476 phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
477 if (osh->contig_base_alloc_va == NULL) {
478 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
479 if (osh->cmn)
480 kfree(osh->cmn);
481 kfree(osh);
482 return NULL;
483 }
484 osh->contig_base_va = osh->contig_base_alloc_va;
485
486 if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
487 CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
488 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
489 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
490 if (osh->cmn)
491 kfree(osh->cmn);
492 kfree(osh);
493 return NULL;
494 }
495 osh->sec_list_base_4096 = osh->sec_list_4096;
496
497 #endif /* BCM_SECURE_DMA */
498
499 switch (bustype) {
500 case PCI_BUS:
501 case SI_BUS:
502 case PCMCIA_BUS:
503 osh->pub.mmbus = TRUE;
504 break;
505 case JTAG_BUS:
506 case SDIO_BUS:
507 case USB_BUS:
508 case SPI_BUS:
509 case RPC_BUS:
510 osh->pub.mmbus = FALSE;
511 break;
512 default:
513 ASSERT(FALSE);
514 break;
515 }
516
517
518
519 return osh;
520 }
521
522 int osl_static_mem_init(osl_t *osh, void *adapter)
523 {
524 #ifdef CONFIG_DHD_USE_STATIC_BUF
525 if (!bcm_static_buf && adapter) {
526 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
527 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
528 printk("can not alloc static buf!\n");
529 bcm_static_skb = NULL;
530 ASSERT(osh->magic == OS_HANDLE_MAGIC);
531 return -ENOMEM;
532 } else {
533 printk("alloc static buf at %p!\n", bcm_static_buf);
534 }
535
536 spin_lock_init(&bcm_static_buf->static_lock);
537
538 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
539 }
540
541 #if defined(DHD_USE_STATIC_CTRLBUF)
542 if (!bcm_static_skb && adapter) {
543 int i;
544 void *skb_buff_ptr = 0;
545 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
546 skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
547 if (!skb_buff_ptr) {
548 printk("cannot alloc static buf!\n");
549 bcm_static_buf = NULL;
550 bcm_static_skb = NULL;
551 ASSERT(osh->magic == OS_HANDLE_MAGIC);
552 return -ENOMEM;
553 }
554
555 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
556 (STATIC_PKT_MAX_NUM));
557 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
558 bcm_static_skb->pkt_use[i] = 0;
559 }
560
561 #ifdef DHD_USE_STATIC_CTRLBUF
562 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
563 bcm_static_skb->last_allocated_index = 0;
564 #else
565 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
566 #endif /* DHD_USE_STATIC_CTRLBUF */
567 }
568 #endif
569 #endif /* CONFIG_DHD_USE_STATIC_BUF */
570
571 return 0;
572 }
573
574 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
575 {
576 osh->bus_handle = bus_handle;
577 }
578
579 void* osl_get_bus_handle(osl_t *osh)
580 {
581 return osh->bus_handle;
582 }
583
584 void
585 osl_detach(osl_t *osh)
586 {
587 if (osh == NULL)
588 return;
589
590 #ifdef BCM_SECURE_DMA
591 if (osh->stb_ext_params == SECDMA_EXT_FILE)
592 stbpriv_exit(osh);
593 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
594 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
595 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
596
597 secdma_found--;
598 #endif /* BCM_SECURE_DMA */
599
600
601 bcm_object_trace_deinit();
602
603 ASSERT(osh->magic == OS_HANDLE_MAGIC);
604 atomic_sub(1, &osh->cmn->refcount);
605 if (atomic_read(&osh->cmn->refcount) == 0) {
606 kfree(osh->cmn);
607 }
608 kfree(osh);
609 }
610
611 int osl_static_mem_deinit(osl_t *osh, void *adapter)
612 {
613 #ifdef CONFIG_DHD_USE_STATIC_BUF
614 if (bcm_static_buf) {
615 bcm_static_buf = 0;
616 }
617 #endif /* CONFIG_DHD_USE_STATIC_BUF */
618 return 0;
619 }
620
621 static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
622 {
623 struct sk_buff *skb;
624 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
625 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
626 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
627 flags |= GFP_ATOMIC;
628 #endif
629 skb = __dev_alloc_skb(len, flags);
630 #else
631 skb = dev_alloc_skb(len);
632 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
633 return skb;
634 }
635
636
637
638 /* Convert a driver packet to native(OS) packet
639 * In the process, packettag is zeroed out before sending up
640 * IP code depends on skb->cb to be setup correctly with various options
641 * In our case, that means it should be 0
642 */
643 struct sk_buff * BCMFASTPATH
644 osl_pkt_tonative(osl_t *osh, void *pkt)
645 {
646 struct sk_buff *nskb;
647
648 if (osh->pub.pkttag)
649 OSL_PKTTAG_CLEAR(pkt);
650
651 /* Decrement the packet counter */
652 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
653 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
654
655 }
656 return (struct sk_buff *)pkt;
657 }
658
659 /* Convert a native(OS) packet to driver packet.
660 * In the process, native packet is destroyed, there is no copying
661 * Also, a packettag is zeroed out
662 */
663 void * BCMFASTPATH
664 osl_pkt_frmnative(osl_t *osh, void *pkt)
665 {
666 struct sk_buff *nskb;
667
668 if (osh->pub.pkttag)
669 OSL_PKTTAG_CLEAR(pkt);
670
671 /* Increment the packet counter */
672 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
673 atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
674
675 }
676 return (void *)pkt;
677 }
678
679 /* Return a new packet. zero out pkttag */
680 void * BCMFASTPATH
681 osl_pktget(osl_t *osh, uint len)
682 {
683 struct sk_buff *skb;
684 uchar num = 0;
685 if (lmtest != FALSE) {
686 get_random_bytes(&num, sizeof(uchar));
687 if ((num + 1) <= (256 * lmtest / 100))
688 return NULL;
689 }
690
691 if ((skb = osl_alloc_skb(osh, len))) {
692 skb->tail += len;
693 skb->len += len;
694 skb->priority = 0;
695
696 atomic_inc(&osh->cmn->pktalloced);
697 }
698
699 return ((void*) skb);
700 }
701
702
703 /* Free the driver packet. Free the tag if present */
704 void BCMFASTPATH
705 osl_pktfree(osl_t *osh, void *p, bool send)
706 {
707 struct sk_buff *skb, *nskb;
708 if (osh == NULL)
709 return;
710
711 skb = (struct sk_buff*) p;
712
713 if (send && osh->pub.tx_fn)
714 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
715
716 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
717
718 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
719 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
720 printk("%s: pkt %p is from static pool\n",
721 __FUNCTION__, p);
722 dump_stack();
723 return;
724 }
725
726 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
727 printk("%s: pkt %p is from static pool and not in used\n",
728 __FUNCTION__, p);
729 dump_stack();
730 return;
731 }
732 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
733
734 /* perversion: we use skb->next to chain multi-skb packets */
735 while (skb) {
736 nskb = skb->next;
737 skb->next = NULL;
738
739
740
741
742 {
743 dev_kfree_skb_any(skb);
744 }
745 atomic_dec(&osh->cmn->pktalloced);
746 skb = nskb;
747 }
748 }
749
750 #ifdef CONFIG_DHD_USE_STATIC_BUF
751 void*
752 osl_pktget_static(osl_t *osh, uint len)
753 {
754 int i = 0;
755 struct sk_buff *skb;
756 #ifdef DHD_USE_STATIC_CTRLBUF
757 unsigned long flags;
758 #endif /* DHD_USE_STATIC_CTRLBUF */
759
760 if (!bcm_static_skb)
761 return osl_pktget(osh, len);
762
763 if (len > DHD_SKB_MAX_BUFSIZE) {
764 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
765 return osl_pktget(osh, len);
766 }
767
768 #ifdef DHD_USE_STATIC_CTRLBUF
769 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
770
771 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
772 uint32 index;
773 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
774 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
775 bcm_static_skb->last_allocated_index++;
776 if (bcm_static_skb->skb_8k[index] &&
777 bcm_static_skb->pkt_use[index] == 0) {
778 break;
779 }
780 }
781
782 if ((i != STATIC_PKT_2PAGE_NUM) &&
783 (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
784 bcm_static_skb->pkt_use[index] = 1;
785 skb = bcm_static_skb->skb_8k[index];
786 skb->data = skb->head;
787 #ifdef NET_SKBUFF_DATA_USES_OFFSET
788 skb_set_tail_pointer(skb, NET_SKB_PAD);
789 #else
790 skb->tail = skb->data + NET_SKB_PAD;
791 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
792 skb->data += NET_SKB_PAD;
793 skb->cloned = 0;
794 skb->priority = 0;
795 #ifdef NET_SKBUFF_DATA_USES_OFFSET
796 skb_set_tail_pointer(skb, len);
797 #else
798 skb->tail = skb->data + len;
799 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
800 skb->len = len;
801 skb->mac_len = PREALLOC_USED_MAGIC;
802 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
803 return skb;
804 }
805 }
806
807 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
808 printk("%s: all static pkt in use!\n", __FUNCTION__);
809 return NULL;
810 #else
811 down(&bcm_static_skb->osl_pkt_sem);
812
813 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
814 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
815 if (bcm_static_skb->skb_4k[i] &&
816 bcm_static_skb->pkt_use[i] == 0) {
817 break;
818 }
819 }
820
821 if (i != STATIC_PKT_MAX_NUM) {
822 bcm_static_skb->pkt_use[i] = 1;
823
824 skb = bcm_static_skb->skb_4k[i];
825 #ifdef NET_SKBUFF_DATA_USES_OFFSET
826 skb_set_tail_pointer(skb, len);
827 #else
828 skb->tail = skb->data + len;
829 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
830 skb->len = len;
831
832 up(&bcm_static_skb->osl_pkt_sem);
833 return skb;
834 }
835 }
836
837 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
838 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
839 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
840 bcm_static_skb->pkt_use[i] == 0) {
841 break;
842 }
843 }
844
845 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
846 bcm_static_skb->pkt_use[i] = 1;
847 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
848 #ifdef NET_SKBUFF_DATA_USES_OFFSET
849 skb_set_tail_pointer(skb, len);
850 #else
851 skb->tail = skb->data + len;
852 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
853 skb->len = len;
854
855 up(&bcm_static_skb->osl_pkt_sem);
856 return skb;
857 }
858 }
859
860 #if defined(ENHANCED_STATIC_BUF)
861 if (bcm_static_skb->skb_16k &&
862 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
863 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
864
865 skb = bcm_static_skb->skb_16k;
866 #ifdef NET_SKBUFF_DATA_USES_OFFSET
867 skb_set_tail_pointer(skb, len);
868 #else
869 skb->tail = skb->data + len;
870 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
871 skb->len = len;
872
873 up(&bcm_static_skb->osl_pkt_sem);
874 return skb;
875 }
876 #endif /* ENHANCED_STATIC_BUF */
877
878 up(&bcm_static_skb->osl_pkt_sem);
879 printk("%s: all static pkt in use!\n", __FUNCTION__);
880 return osl_pktget(osh, len);
881 #endif /* DHD_USE_STATIC_CTRLBUF */
882 }
883
884 void
885 osl_pktfree_static(osl_t *osh, void *p, bool send)
886 {
887 int i;
888 #ifdef DHD_USE_STATIC_CTRLBUF
889 struct sk_buff *skb = (struct sk_buff *)p;
890 unsigned long flags;
891 #endif /* DHD_USE_STATIC_CTRLBUF */
892
893 if (!p) {
894 return;
895 }
896
897 if (!bcm_static_skb) {
898 osl_pktfree(osh, p, send);
899 return;
900 }
901
902 #ifdef DHD_USE_STATIC_CTRLBUF
903 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
904
905 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
906 if (p == bcm_static_skb->skb_8k[i]) {
907 if (bcm_static_skb->pkt_use[i] == 0) {
908 printk("%s: static pkt idx %d(%p) is double free\n",
909 __FUNCTION__, i, p);
910 } else {
911 bcm_static_skb->pkt_use[i] = 0;
912 }
913
914 if (skb->mac_len != PREALLOC_USED_MAGIC) {
915 printk("%s: static pkt idx %d(%p) is not in used\n",
916 __FUNCTION__, i, p);
917 }
918
919 skb->mac_len = PREALLOC_FREE_MAGIC;
920 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
921 return;
922 }
923 }
924
925 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
926 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
927 #else
928 down(&bcm_static_skb->osl_pkt_sem);
929 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
930 if (p == bcm_static_skb->skb_4k[i]) {
931 bcm_static_skb->pkt_use[i] = 0;
932 up(&bcm_static_skb->osl_pkt_sem);
933 return;
934 }
935 }
936
937 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
938 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
939 bcm_static_skb->pkt_use[i] = 0;
940 up(&bcm_static_skb->osl_pkt_sem);
941 return;
942 }
943 }
944 #ifdef ENHANCED_STATIC_BUF
945 if (p == bcm_static_skb->skb_16k) {
946 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
947 up(&bcm_static_skb->osl_pkt_sem);
948 return;
949 }
950 #endif
951 up(&bcm_static_skb->osl_pkt_sem);
952 osl_pktfree(osh, p, send);
953 #endif /* DHD_USE_STATIC_CTRLBUF */
954 }
955 #endif /* CONFIG_DHD_USE_STATIC_BUF */
956
957 uint32
958 osl_pci_read_config(osl_t *osh, uint offset, uint size)
959 {
960 uint val = 0;
961 uint retry = PCI_CFG_RETRY;
962
963 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
964
965 /* only 4byte access supported */
966 ASSERT(size == 4);
967
968 do {
969 pci_read_config_dword(osh->pdev, offset, &val);
970 if (val != 0xffffffff)
971 break;
972 } while (retry--);
973
974
975 return (val);
976 }
977
978 void
979 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
980 {
981 uint retry = PCI_CFG_RETRY;
982
983 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
984
985 /* only 4byte access supported */
986 ASSERT(size == 4);
987
988 do {
989 pci_write_config_dword(osh->pdev, offset, val);
990 if (offset != PCI_BAR0_WIN)
991 break;
992 if (osl_pci_read_config(osh, offset, size) == val)
993 break;
994 } while (retry--);
995
996 }
997
998 /* return bus # for the pci device pointed by osh->pdev */
999 uint
1000 osl_pci_bus(osl_t *osh)
1001 {
1002 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1003
1004 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1005 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1006 #else
1007 return ((struct pci_dev *)osh->pdev)->bus->number;
1008 #endif
1009 }
1010
1011 /* return slot # for the pci device pointed by osh->pdev */
1012 uint
1013 osl_pci_slot(osl_t *osh)
1014 {
1015 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1016
1017 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1018 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
1019 #else
1020 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
1021 #endif
1022 }
1023
1024 /* return domain # for the pci device pointed by osh->pdev */
1025 uint
1026 osl_pcie_domain(osl_t *osh)
1027 {
1028 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1029
1030 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1031 }
1032
1033 /* return bus # for the pci device pointed by osh->pdev */
1034 uint
1035 osl_pcie_bus(osl_t *osh)
1036 {
1037 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1038
1039 return ((struct pci_dev *)osh->pdev)->bus->number;
1040 }
1041
1042 /* return the pci device pointed by osh->pdev */
1043 struct pci_dev *
1044 osl_pci_device(osl_t *osh)
1045 {
1046 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1047
1048 return osh->pdev;
1049 }
1050
1051 static void
1052 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
1053 {
1054 }
1055
1056 void
1057 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
1058 {
1059 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
1060 }
1061
1062 void
1063 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
1064 {
1065 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
1066 }
1067
1068 void *
1069 osl_malloc(osl_t *osh, uint size)
1070 {
1071 void *addr;
1072 gfp_t flags;
1073
1074 /* only ASSERT if osh is defined */
1075 if (osh)
1076 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1077 #ifdef CONFIG_DHD_USE_STATIC_BUF
1078 if (bcm_static_buf)
1079 {
1080 unsigned long irq_flags;
1081 int i = 0;
1082 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
1083 {
1084 spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
1085
1086 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
1087 {
1088 if (bcm_static_buf->buf_use[i] == 0)
1089 break;
1090 }
1091
1092 if (i == STATIC_BUF_MAX_NUM)
1093 {
1094 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1095 printk("all static buff in use!\n");
1096 goto original;
1097 }
1098
1099 bcm_static_buf->buf_use[i] = 1;
1100 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1101
1102 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
1103 if (osh)
1104 atomic_add(size, &osh->cmn->malloced);
1105
1106 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
1107 }
1108 }
1109 original:
1110 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1111
1112 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1113 if ((addr = kmalloc(size, flags)) == NULL) {
1114 if (osh)
1115 osh->failed++;
1116 return (NULL);
1117 }
1118 if (osh && osh->cmn)
1119 atomic_add(size, &osh->cmn->malloced);
1120
1121 return (addr);
1122 }
1123
1124 void *
1125 osl_mallocz(osl_t *osh, uint size)
1126 {
1127 void *ptr;
1128
1129 ptr = osl_malloc(osh, size);
1130
1131 if (ptr != NULL) {
1132 bzero(ptr, size);
1133 }
1134
1135 return ptr;
1136 }
1137
1138 void
1139 osl_mfree(osl_t *osh, void *addr, uint size)
1140 {
1141 #ifdef CONFIG_DHD_USE_STATIC_BUF
1142 unsigned long flags;
1143
1144 if (bcm_static_buf)
1145 {
1146 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
1147 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
1148 {
1149 int buf_idx = 0;
1150
1151 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
1152
1153 spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
1154 bcm_static_buf->buf_use[buf_idx] = 0;
1155 spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
1156
1157 if (osh && osh->cmn) {
1158 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1159 atomic_sub(size, &osh->cmn->malloced);
1160 }
1161 return;
1162 }
1163 }
1164 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1165 if (osh && osh->cmn) {
1166 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1167
1168 ASSERT(size <= osl_malloced(osh));
1169
1170 atomic_sub(size, &osh->cmn->malloced);
1171 }
1172 kfree(addr);
1173 }
1174
1175 uint
1176 osl_check_memleak(osl_t *osh)
1177 {
1178 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1179 if (atomic_read(&osh->cmn->refcount) == 1)
1180 return (atomic_read(&osh->cmn->malloced));
1181 else
1182 return 0;
1183 }
1184
1185 uint
1186 osl_malloced(osl_t *osh)
1187 {
1188 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1189 return (atomic_read(&osh->cmn->malloced));
1190 }
1191
1192 uint
1193 osl_malloc_failed(osl_t *osh)
1194 {
1195 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1196 return (osh->failed);
1197 }
1198
1199
1200 uint
1201 osl_dma_consistent_align(void)
1202 {
1203 return (PAGE_SIZE);
1204 }
1205
1206 void*
1207 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
1208 {
1209 void *va;
1210 uint16 align = (1 << align_bits);
1211 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1212
1213 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1214 size += align;
1215 *alloced = size;
1216
1217 #ifndef BCM_SECURE_DMA
1218 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1219 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1220 if (va)
1221 *pap = (ulong)__virt_to_phys((ulong)va);
1222 #else
1223 {
1224 dma_addr_t pap_lin;
1225 struct pci_dev *hwdev = osh->pdev;
1226 gfp_t flags;
1227 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1228 flags = GFP_ATOMIC;
1229 #else
1230 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1231 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1232 va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
1233 *pap = (dmaaddr_t)pap_lin;
1234 }
1235 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1236 #else
1237 va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
1238 #endif /* BCM_SECURE_DMA */
1239 return va;
1240 }
1241
1242 void
1243 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1244 {
1245 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1246
1247 #ifndef BCM_SECURE_DMA
1248 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1249 kfree(va);
1250 #else
1251 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1252 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1253 #else
1254 osl_sec_dma_free_consistent(osh, va, size, pa);
1255 #endif /* BCM_SECURE_DMA */
1256 }
1257
1258 dmaaddr_t BCMFASTPATH
1259 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1260 {
1261 int dir;
1262
1263 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1264 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1265
1266
1267
1268
1269 return (pci_map_single(osh->pdev, va, size, dir));
1270 }
1271
1272 void BCMFASTPATH
1273 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
1274 {
1275 int dir;
1276
1277 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1278
1279
1280 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1281 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1282 }
1283
1284 /* OSL function for CPU relax */
1285 inline void BCMFASTPATH
1286 osl_cpu_relax(void)
1287 {
1288 cpu_relax();
1289 }
1290
1291 #if defined(mips)
1292 inline void BCMFASTPATH
1293 osl_cache_flush(void *va, uint size)
1294 {
1295 unsigned long l = ROUNDDN((unsigned long)va, L1_CACHE_BYTES);
1296 unsigned long e = ROUNDUP((unsigned long)(va+size), L1_CACHE_BYTES);
1297 while (l < e)
1298 {
1299 flush_dcache_line(l); /* Hit_Writeback_Inv_D */
1300 l += L1_CACHE_BYTES; /* next cache line base */
1301 }
1302 }
1303
1304 inline void BCMFASTPATH
1305 osl_cache_inv(void *va, uint size)
1306 {
1307 unsigned long l = ROUNDDN((unsigned long)va, L1_CACHE_BYTES);
1308 unsigned long e = ROUNDUP((unsigned long)(va+size), L1_CACHE_BYTES);
1309 while (l < e)
1310 {
1311 invalidate_dcache_line(l); /* Hit_Invalidate_D */
1312 l += L1_CACHE_BYTES; /* next cache line base */
1313 }
1314 }
1315
1316 inline void osl_prefetch(const void *ptr)
1317 {
1318 __asm__ __volatile__(".set mips4\npref %0,(%1)\n.set mips0\n"::"i" (Pref_Load), "r" (ptr));
1319 }
1320
1321 #elif (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
1322
1323 inline void BCMFASTPATH
1324 osl_cache_flush(void *va, uint size)
1325 {
1326
1327 if (size > 0)
1328 dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
1329 }
1330
1331 inline void BCMFASTPATH
1332 osl_cache_inv(void *va, uint size)
1333 {
1334
1335 dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
1336 }
1337
1338 inline void osl_prefetch(const void *ptr)
1339 {
1340 __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
1341 }
1342
1343 int osl_arch_is_coherent(void)
1344 {
1345 return 0;
1346 }
1347
1348
1349 inline int osl_acp_war_enab(void)
1350 {
1351 return 0;
1352 }
1353
1354 #endif /* mips */
1355
1356
1357 void
1358 osl_delay(uint usec)
1359 {
1360 uint d;
1361
1362 while (usec > 0) {
1363 d = MIN(usec, 1000);
1364 udelay(d);
1365 usec -= d;
1366 }
1367 }
1368
1369 void
1370 osl_sleep(uint ms)
1371 {
1372 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1373 if (ms < 20)
1374 usleep_range(ms*1000, ms*1000 + 1000);
1375 else
1376 #endif
1377 msleep(ms);
1378 }
1379
1380
1381
1382 /* Clone a packet.
1383 * The pkttag contents are NOT cloned.
1384 */
1385 void *
1386 osl_pktdup(osl_t *osh, void *skb)
1387 {
1388 void * p;
1389
1390 ASSERT(!PKTISCHAINED(skb));
1391
1392 /* clear the CTFBUF flag if set and map the rest of the buffer
1393 * before cloning.
1394 */
1395 PKTCTFMAP(osh, skb);
1396
1397 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1398 if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1399 #else
1400 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1401 #endif
1402 return NULL;
1403
1404
1405 /* Clear PKTC context */
1406 PKTSETCLINK(p, NULL);
1407 PKTCCLRFLAGS(p);
1408 PKTCSETCNT(p, 1);
1409 PKTCSETLEN(p, PKTLEN(osh, skb));
1410
1411 /* skb_clone copies skb->cb.. we don't want that */
1412 if (osh->pub.pkttag)
1413 OSL_PKTTAG_CLEAR(p);
1414
1415 /* Increment the packet counter */
1416 atomic_inc(&osh->cmn->pktalloced);
1417
1418 return (p);
1419 }
1420
1421
1422
1423 /*
1424 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1425 */
1426
1427 /*
1428 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1429 */
1430
1431 uint
1432 osl_pktalloced(osl_t *osh)
1433 {
1434 if (atomic_read(&osh->cmn->refcount) == 1)
1435 return (atomic_read(&osh->cmn->pktalloced));
1436 else
1437 return 0;
1438 }
1439
1440 uint32
1441 osl_rand(void)
1442 {
1443 uint32 rand;
1444
1445 get_random_bytes(&rand, sizeof(rand));
1446
1447 return rand;
1448 }
1449
1450 /* Linux Kernel: File Operations: start */
1451 void *
1452 osl_os_open_image(char *filename)
1453 {
1454 struct file *fp;
1455
1456 fp = filp_open(filename, O_RDONLY, 0);
1457 /*
1458 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1459 * Alternative:
1460 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1461 * ???
1462 */
1463 if (IS_ERR(fp))
1464 fp = NULL;
1465
1466 return fp;
1467 }
1468
1469 int
1470 osl_os_get_image_block(char *buf, int len, void *image)
1471 {
1472 struct file *fp = (struct file *)image;
1473 int rdlen;
1474
1475 if (!image)
1476 return 0;
1477
1478 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1479 if (rdlen > 0)
1480 fp->f_pos += rdlen;
1481
1482 return rdlen;
1483 }
1484
1485 void
1486 osl_os_close_image(void *image)
1487 {
1488 if (image)
1489 filp_close((struct file *)image, NULL);
1490 }
1491
1492 int
1493 osl_os_image_size(void *image)
1494 {
1495 int len = 0, curroffset;
1496
1497 if (image) {
1498 /* store the current offset */
1499 curroffset = generic_file_llseek(image, 0, 1);
1500 /* goto end of file to get length */
1501 len = generic_file_llseek(image, 0, 2);
1502 /* restore back the offset */
1503 generic_file_llseek(image, curroffset, 0);
1504 }
1505 return len;
1506 }
1507
1508 /* Linux Kernel: File Operations: end */
1509
1510 #if (defined(STB) && defined(__arm__))
1511 inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
1512 {
1513 unsigned long flags = 0;
1514 int pci_access = 0;
1515
1516 if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1517 pci_access = 1;
1518
1519 if (pci_access && ACP_WAR_ENAB())
1520 spin_lock_irqsave(&l2x0_reg_lock, flags);
1521
1522 switch (size) {
1523 case sizeof(uint8):
1524 *(uint8*)v = readb((volatile uint8*)(addr));
1525 break;
1526 case sizeof(uint16):
1527 *(uint16*)v = readw((volatile uint16*)(addr));
1528 break;
1529 case sizeof(uint32):
1530 *(uint32*)v = readl((volatile uint32*)(addr));
1531 break;
1532 case sizeof(uint64):
1533 *(uint64*)v = *((volatile uint64*)(addr));
1534 break;
1535 }
1536
1537 if (pci_access && ACP_WAR_ENAB())
1538 spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1539 }
1540 #endif
1541
1542 /* APIs to set/get specific quirks in OSL layer */
1543 void
1544 osl_flag_set(osl_t *osh, uint32 mask)
1545 {
1546 osh->flags |= mask;
1547 }
1548
1549 #if defined(STB)
1550 inline bool BCMFASTPATH
1551 #else
1552 bool
1553 #endif
1554 osl_is_flag_set(osl_t *osh, uint32 mask)
1555 {
1556 return (osh->flags & mask);
1557 }
1558
1559 #ifdef BCM_SECURE_DMA
1560 static void *
1561 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1562 {
1563
1564 struct page **map;
1565 int order, i;
1566 void *addr = NULL;
1567
1568 size = PAGE_ALIGN(size);
1569 order = get_order(size);
1570
1571 map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1572
1573 if (map == NULL)
1574 return NULL;
1575
1576 for (i = 0; i < (size >> PAGE_SHIFT); i++)
1577 map[i] = page + i;
1578
1579 if (iscache) {
1580 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1581 if (isdecr) {
1582 osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
1583 }
1584 }
1585 else {
1586
1587 #if defined(__ARM_ARCH_7A__)
1588 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1589 pgprot_noncached(__pgprot(PAGE_KERNEL)));
1590 #endif
1591 if (isdecr) {
1592 osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
1593 }
1594 }
1595
1596 kfree(map);
1597 return (void *)addr;
1598 }
1599
1600 static void
1601 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1602 {
1603 vunmap(contig_base_va);
1604 }
1605
1606 static int
1607 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1608 {
1609 int i;
1610 int ret = BCME_OK;
1611 sec_mem_elem_t *sec_mem_elem;
1612
1613 if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1614
1615 *list = sec_mem_elem;
1616 bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1617 for (i = 0; i < max-1; i++) {
1618 sec_mem_elem->next = (sec_mem_elem + 1);
1619 sec_mem_elem->size = mbsize;
1620 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1621 sec_mem_elem->vac = osh->contig_base_alloc_va;
1622
1623 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1624 osh->contig_base_alloc += mbsize;
1625 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1626
1627 sec_mem_elem = sec_mem_elem + 1;
1628 }
1629 sec_mem_elem->next = NULL;
1630 sec_mem_elem->size = mbsize;
1631 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1632 sec_mem_elem->vac = osh->contig_base_alloc_va;
1633
1634 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1635 osh->contig_base_alloc += mbsize;
1636 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1637
1638 } else {
1639 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1640 ret = BCME_ERROR;
1641 }
1642 return ret;
1643 }
1644
1645
1646 static void
1647 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1648 {
1649 if (sec_list_base)
1650 kfree(sec_list_base);
1651 }
1652
1653 static sec_mem_elem_t * BCMFASTPATH
1654 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1655 struct sec_cma_info *ptr_cma_info, uint offset)
1656 {
1657 sec_mem_elem_t *sec_mem_elem = NULL;
1658
1659 ASSERT(osh->sec_list_4096);
1660 sec_mem_elem = osh->sec_list_4096;
1661 osh->sec_list_4096 = sec_mem_elem->next;
1662
1663 sec_mem_elem->next = NULL;
1664
1665 if (ptr_cma_info->sec_alloc_list_tail) {
1666 ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1667 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1668 }
1669 else {
1670 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1671 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1672 ptr_cma_info->sec_alloc_list = sec_mem_elem;
1673 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1674 }
1675 return sec_mem_elem;
1676 }
1677
1678 static void BCMFASTPATH
1679 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1680 {
1681 sec_mem_elem->dma_handle = 0x0;
1682 sec_mem_elem->va = NULL;
1683 sec_mem_elem->next = osh->sec_list_4096;
1684 osh->sec_list_4096 = sec_mem_elem;
1685 }
1686
1687 static sec_mem_elem_t * BCMFASTPATH
1688 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1689 {
1690 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1691 sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1692
1693 if (!sec_mem_elem) {
1694 printk("osl_sec_dma_find_rem_elem ptr_cma_info->sec_alloc_list is NULL \n");
1695 return NULL;
1696 }
1697
1698 if (sec_mem_elem->dma_handle == dma_handle) {
1699
1700 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1701
1702 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1703 ptr_cma_info->sec_alloc_list_tail = NULL;
1704 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1705 }
1706
1707 return sec_mem_elem;
1708 }
1709 sec_mem_elem = sec_mem_elem->next;
1710
1711 while (sec_mem_elem != NULL) {
1712
1713 if (sec_mem_elem->dma_handle == dma_handle) {
1714
1715 sec_prv_elem->next = sec_mem_elem->next;
1716 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1717 ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1718
1719 return sec_mem_elem;
1720 }
1721 sec_prv_elem = sec_mem_elem;
1722 sec_mem_elem = sec_mem_elem->next;
1723 }
1724 return NULL;
1725 }
1726
1727 static sec_mem_elem_t *
1728 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1729 {
1730 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1731
1732 if (sec_mem_elem) {
1733
1734 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1735
1736 if (ptr_cma_info->sec_alloc_list == NULL)
1737 ptr_cma_info->sec_alloc_list_tail = NULL;
1738
1739 return sec_mem_elem;
1740
1741 } else
1742 return NULL;
1743 }
1744
1745 static void * BCMFASTPATH
1746 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1747 {
1748 return ptr_cma_info->sec_alloc_list_tail;
1749 }
1750
1751 dma_addr_t BCMFASTPATH
1752 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1753 hnddma_seg_map_t *dmah, void *ptr_cma_info)
1754 {
1755 sec_mem_elem_t *sec_mem_elem;
1756 struct page *pa_cma_page;
1757 uint loffset;
1758 void *vaorig = ((uint8 *)va + size);
1759 dma_addr_t dma_handle = 0x0;
1760 /* packet will be the one added with osl_sec_dma_map() just before this call */
1761
1762 sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1763
1764 if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1765
1766 pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1767 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1768
1769 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1770 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1771
1772 } else {
1773 printf("%s: error orig va not found va = 0x%p \n",
1774 __FUNCTION__, vaorig);
1775 }
1776 return dma_handle;
1777 }
1778
1779 dma_addr_t BCMFASTPATH
1780 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1781 hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1782 {
1783
1784 sec_mem_elem_t *sec_mem_elem;
1785 struct page *pa_cma_page;
1786 void *pa_cma_kmap_va = NULL;
1787 uint buflen = 0;
1788 dma_addr_t dma_handle = 0x0;
1789 uint loffset;
1790
1791 ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1792 sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1793
1794 sec_mem_elem->va = va;
1795 sec_mem_elem->direction = direction;
1796 pa_cma_page = sec_mem_elem->pa_cma_page;
1797
1798 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1799 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1800 * pa_cma_kmap_va += loffset;
1801 */
1802
1803 pa_cma_kmap_va = sec_mem_elem->vac;
1804 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1805 buflen = size;
1806
1807 if (direction == DMA_TX) {
1808 memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1809
1810 if (dmah) {
1811 dmah->nsegs = 1;
1812 dmah->origsize = buflen;
1813 }
1814 }
1815 else
1816 {
1817 if ((p != NULL) && (dmah != NULL)) {
1818 dmah->nsegs = 1;
1819 dmah->origsize = buflen;
1820 }
1821 }
1822
1823 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1824 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1825
1826 if (dmah) {
1827 dmah->segs[0].addr = dma_handle;
1828 dmah->segs[0].length = buflen;
1829 }
1830 sec_mem_elem->dma_handle = dma_handle;
1831 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
1832 return dma_handle;
1833 }
1834
1835 dma_addr_t BCMFASTPATH
1836 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1837 {
1838
1839 struct page *pa_cma_page;
1840 phys_addr_t pa_cma;
1841 dma_addr_t dma_handle = 0x0;
1842 uint loffset;
1843
1844 pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa);
1845 pa_cma_page = phys_to_page(pa_cma);
1846 loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1847
1848 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1849 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1850
1851 return dma_handle;
1852 }
1853
1854 void BCMFASTPATH
1855 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1856 void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
1857 {
1858 sec_mem_elem_t *sec_mem_elem;
1859 struct page *pa_cma_page;
1860 void *pa_cma_kmap_va = NULL;
1861 uint buflen = 0;
1862 dma_addr_t pa_cma;
1863 void *va;
1864 int read_count = 0;
1865 BCM_REFERENCE(buflen);
1866 BCM_REFERENCE(read_count);
1867
1868 sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1869 ASSERT(sec_mem_elem);
1870
1871 va = sec_mem_elem->va;
1872 va = (uint8 *)va - offset;
1873 pa_cma = sec_mem_elem->pa_cma;
1874
1875 pa_cma_page = sec_mem_elem->pa_cma_page;
1876
1877
1878 if (direction == DMA_RX) {
1879
1880 if (p == NULL) {
1881
1882 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1883 * pa_cma_kmap_va += loffset;
1884 */
1885
1886 pa_cma_kmap_va = sec_mem_elem->vac;
1887
1888 dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1889 memcpy(va, pa_cma_kmap_va, size);
1890 /* kunmap_atomic(pa_cma_kmap_va); */
1891 }
1892 } else {
1893 dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1894 }
1895
1896 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1897 }
1898
1899 void
1900 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1901 {
1902
1903 sec_mem_elem_t *sec_mem_elem;
1904
1905 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1906
1907 while (sec_mem_elem != NULL) {
1908
1909 dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1910 sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1911 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1912
1913 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1914 }
1915 }
1916
1917 static void
1918 osl_sec_dma_init_consistent(osl_t *osh)
1919 {
1920 int i;
1921 void *temp_va = osh->contig_base_alloc_coherent_va;
1922 phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1923
1924 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1925 osh->sec_cma_coherent[i].avail = TRUE;
1926 osh->sec_cma_coherent[i].va = temp_va;
1927 osh->sec_cma_coherent[i].pa = temp_pa;
1928 temp_va += SEC_CMA_COHERENT_BLK;
1929 temp_pa += SEC_CMA_COHERENT_BLK;
1930 }
1931 }
1932
1933 static void *
1934 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1935 {
1936
1937 void *temp_va = NULL;
1938 ulong temp_pa = 0;
1939 int i;
1940
1941 if (size > SEC_CMA_COHERENT_BLK) {
1942 printf("%s unsupported size\n", __FUNCTION__);
1943 return NULL;
1944 }
1945
1946 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1947 if (osh->sec_cma_coherent[i].avail == TRUE) {
1948 temp_va = osh->sec_cma_coherent[i].va;
1949 temp_pa = osh->sec_cma_coherent[i].pa;
1950 osh->sec_cma_coherent[i].avail = FALSE;
1951 break;
1952 }
1953 }
1954
1955 if (i == SEC_CMA_COHERENT_MAX)
1956 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1957 temp_va, (ulong)temp_pa, size);
1958
1959 *pap = (unsigned long)temp_pa;
1960 return temp_va;
1961 }
1962
1963 static void
1964 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1965 {
1966 int i = 0;
1967
1968 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1969 if (osh->sec_cma_coherent[i].va == va) {
1970 osh->sec_cma_coherent[i].avail = TRUE;
1971 break;
1972 }
1973 }
1974 if (i == SEC_CMA_COHERENT_MAX)
1975 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1976 va, (ulong)pa, size);
1977 }
1978
1979 #endif /* BCM_SECURE_DMA */
1980
1981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
1982 #include <linux/kallsyms.h>
1983 #include <net/sock.h>
1984 void
1985 osl_pkt_orphan_partial(struct sk_buff *skb)
1986 {
1987 uint32 fraction;
1988 static void *p_tcp_wfree = NULL;
1989
1990 if (!skb->destructor || skb->destructor == sock_wfree)
1991 return;
1992
1993 if (unlikely(!p_tcp_wfree)) {
1994 char sym[KSYM_SYMBOL_LEN];
1995 sprint_symbol(sym, (unsigned long)skb->destructor);
1996 sym[9] = 0;
1997 if (!strcmp(sym, "tcp_wfree"))
1998 p_tcp_wfree = skb->destructor;
1999 else
2000 return;
2001 }
2002
2003 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
2004 return;
2005
2006 /* abstract a certain portion of skb truesize from the socket
2007 * sk_wmem_alloc to allow more skb can be allocated for this
2008 * socket for better cusion meeting WiFi device requirement
2009 */
2010 fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
2011 skb->truesize -= fraction;
2012 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
2013 }
2014 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */