bcmdhd_1_77: Import A320F (A320FLXXU2CRE3) Oreo driver
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / net / wireless / bcmdhd_1_77 / linux_osl.c
1 /*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2018, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $
28 */
29
30 #define LINUX_PORT
31
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <linuxver.h>
35 #include <bcmdefs.h>
36
37
38 #if !defined(STBLINUX)
39 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
40 #include <asm/cacheflush.h>
41 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
42 #endif /* STBLINUX */
43
44 #include <linux/random.h>
45
46 #include <osl.h>
47 #include <bcmutils.h>
48 #include <linux/delay.h>
49 #include <linux/vmalloc.h>
50 #include <pcicfg.h>
51
52
53 #ifdef BCM_SECURE_DMA
54 #include <linux/module.h>
55 #include <linux/kernel.h>
56 #include <linux/io.h>
57 #include <linux/printk.h>
58 #include <linux/errno.h>
59 #include <linux/mm.h>
60 #include <linux/moduleparam.h>
61 #include <asm/io.h>
62 #include <linux/skbuff.h>
63 #include <stbutils.h>
64 #include <linux/highmem.h>
65 #include <linux/dma-mapping.h>
66 #include <asm/memory.h>
67 #endif /* BCM_SECURE_DMA */
68
69 #include <linux/fs.h>
70
71 #if defined(STB)
72 #include <linux/spinlock.h>
73 extern spinlock_t l2x0_reg_lock;
74 #endif
75
76 #ifdef BCM_OBJECT_TRACE
77 #include <bcmutils.h>
78 #endif /* BCM_OBJECT_TRACE */
79
80 #define PCI_CFG_RETRY 10
81
82 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
83 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
84 #define DUMPBUFSZ 1024
85
86 /* dependancy check */
87 #if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
88 #error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
89 #endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
90
91 #ifdef CONFIG_DHD_USE_STATIC_BUF
92 #ifdef DHD_USE_STATIC_CTRLBUF
93 #define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
94 #define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
95 #define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
96
97 #define PREALLOC_FREE_MAGIC 0xFEDC
98 #define PREALLOC_USED_MAGIC 0xFCDE
99 #else
100 #define DHD_SKB_HDRSIZE 336
101 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
102 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
103 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
104 #endif /* DHD_USE_STATIC_CTRLBUF */
105
106 #define STATIC_BUF_MAX_NUM 16
107 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
108 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
109
110 typedef struct bcm_static_buf {
111 spinlock_t static_lock;
112 unsigned char *buf_ptr;
113 unsigned char buf_use[STATIC_BUF_MAX_NUM];
114 } bcm_static_buf_t;
115
116 static bcm_static_buf_t *bcm_static_buf = 0;
117
118 #ifdef DHD_USE_STATIC_CTRLBUF
119 #define STATIC_PKT_4PAGE_NUM 0
120 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
121 #elif defined(ENHANCED_STATIC_BUF)
122 #define STATIC_PKT_4PAGE_NUM 1
123 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
124 #else
125 #define STATIC_PKT_4PAGE_NUM 0
126 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
127 #endif /* DHD_USE_STATIC_CTRLBUF */
128
129 #ifdef DHD_USE_STATIC_CTRLBUF
130 #define STATIC_PKT_1PAGE_NUM 0
131 #define STATIC_PKT_2PAGE_NUM 128
132 #else
133 #define STATIC_PKT_1PAGE_NUM 8
134 #define STATIC_PKT_2PAGE_NUM 8
135 #endif /* DHD_USE_STATIC_CTRLBUF */
136
137 #define STATIC_PKT_1_2PAGE_NUM \
138 ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
139 #define STATIC_PKT_MAX_NUM \
140 ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
141
142 typedef struct bcm_static_pkt {
143 #ifdef DHD_USE_STATIC_CTRLBUF
144 struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
145 unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
146 spinlock_t osl_pkt_lock;
147 uint32 last_allocated_index;
148 #else
149 struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
150 struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
151 #ifdef ENHANCED_STATIC_BUF
152 struct sk_buff *skb_16k;
153 #endif /* ENHANCED_STATIC_BUF */
154 struct semaphore osl_pkt_sem;
155 #endif /* DHD_USE_STATIC_CTRLBUF */
156 unsigned char pkt_use[STATIC_PKT_MAX_NUM];
157 } bcm_static_pkt_t;
158
159 static bcm_static_pkt_t *bcm_static_skb = 0;
160
161 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
162 #endif /* CONFIG_DHD_USE_STATIC_BUF */
163
164 typedef struct bcm_mem_link {
165 struct bcm_mem_link *prev;
166 struct bcm_mem_link *next;
167 uint size;
168 int line;
169 void *osh;
170 char file[BCM_MEM_FILENAME_LEN];
171 } bcm_mem_link_t;
172
173 struct osl_cmn_info {
174 atomic_t malloced;
175 atomic_t pktalloced; /* Number of allocated packet buffers */
176 spinlock_t dbgmem_lock;
177 bcm_mem_link_t *dbgmem_list;
178 bcm_mem_link_t *dbgvmem_list;
179 spinlock_t pktalloc_lock;
180 atomic_t refcount; /* Number of references to this shared structure. */
181 };
182 typedef struct osl_cmn_info osl_cmn_t;
183
184 struct osl_info {
185 osl_pubinfo_t pub;
186 uint32 flags; /* If specific cases to be handled in the OSL */
187 #ifdef CTFPOOL
188 ctfpool_t *ctfpool;
189 #endif /* CTFPOOL */
190 uint magic;
191 void *pdev;
192 uint failed;
193 uint bustype;
194 osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
195
196 void *bus_handle;
197 #ifdef BCM_SECURE_DMA
198 #ifdef NOT_YET
199 struct sec_mem_elem *sec_list_512;
200 struct sec_mem_elem *sec_list_base_512;
201 struct sec_mem_elem *sec_list_2048;
202 struct sec_mem_elem *sec_list_base_2048;
203 #endif /* NOT_YET */
204 struct sec_mem_elem *sec_list_4096;
205 struct sec_mem_elem *sec_list_base_4096;
206 phys_addr_t contig_base;
207 void *contig_base_va;
208 phys_addr_t contig_base_alloc;
209 void *contig_base_alloc_va;
210 phys_addr_t contig_base_alloc_coherent;
211 void *contig_base_alloc_coherent_va;
212 void *contig_base_coherent_va;
213 void *contig_delta_va_pa;
214 struct {
215 phys_addr_t pa;
216 void *va;
217 bool avail;
218 } sec_cma_coherent[SEC_CMA_COHERENT_MAX];
219 int stb_ext_params;
220 #endif /* BCM_SECURE_DMA */
221 };
222 #ifdef BCM_SECURE_DMA
223 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
224 bool iscache, bool isdecr);
225 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
226 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
227 sec_mem_elem_t **list);
228 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
229 void *sec_list_base);
230 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
231 int direction, struct sec_cma_info *ptr_cma_info, uint offset);
232 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
233 static void osl_sec_dma_init_consistent(osl_t *osh);
234 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
235 ulong *pap);
236 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
237 #endif /* BCM_SECURE_DMA */
238
239 #ifdef BCM_OBJECT_TRACE
240 /* don't clear the first 4 byte that is the pkt sn */
241 #define OSL_PKTTAG_CLEAR(p) \
242 do { \
243 struct sk_buff *s = (struct sk_buff *)(p); \
244 ASSERT(OSL_PKTTAG_SZ == 32); \
245 *(uint32 *)(&s->cb[4]) = 0; \
246 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
247 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
248 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
249 } while (0)
250 #else
251 #define OSL_PKTTAG_CLEAR(p) \
252 do { \
253 struct sk_buff *s = (struct sk_buff *)(p); \
254 ASSERT(OSL_PKTTAG_SZ == 32); \
255 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
256 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
257 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
258 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
259 } while (0)
260 #endif /* BCM_OBJECT_TRACE */
261
262 /* PCMCIA attribute space access macros */
263
264 uint32 g_assert_type = 0; /* By Default Kernel Panic */
265
266 module_param(g_assert_type, int, 0);
267 #ifdef BCM_SECURE_DMA
268 #define SECDMA_MODULE_PARAMS 0
269 #define SECDMA_EXT_FILE 1
270 unsigned long secdma_addr = 0;
271 unsigned long secdma_addr2 = 0;
272 u32 secdma_size = 0;
273 u32 secdma_size2 = 0;
274 module_param(secdma_addr, ulong, 0);
275 module_param(secdma_size, int, 0);
276 module_param(secdma_addr2, ulong, 0);
277 module_param(secdma_size2, int, 0);
278 static int secdma_found = 0;
279 #endif /* BCM_SECURE_DMA */
280
281 static int16 linuxbcmerrormap[] =
282 { 0, /* 0 */
283 -EINVAL, /* BCME_ERROR */
284 -EINVAL, /* BCME_BADARG */
285 -EINVAL, /* BCME_BADOPTION */
286 -EINVAL, /* BCME_NOTUP */
287 -EINVAL, /* BCME_NOTDOWN */
288 -EINVAL, /* BCME_NOTAP */
289 -EINVAL, /* BCME_NOTSTA */
290 -EINVAL, /* BCME_BADKEYIDX */
291 -EINVAL, /* BCME_RADIOOFF */
292 -EINVAL, /* BCME_NOTBANDLOCKED */
293 -EINVAL, /* BCME_NOCLK */
294 -EINVAL, /* BCME_BADRATESET */
295 -EINVAL, /* BCME_BADBAND */
296 -E2BIG, /* BCME_BUFTOOSHORT */
297 -E2BIG, /* BCME_BUFTOOLONG */
298 -EBUSY, /* BCME_BUSY */
299 -EINVAL, /* BCME_NOTASSOCIATED */
300 -EINVAL, /* BCME_BADSSIDLEN */
301 -EINVAL, /* BCME_OUTOFRANGECHAN */
302 -EINVAL, /* BCME_BADCHAN */
303 -EFAULT, /* BCME_BADADDR */
304 -ENOMEM, /* BCME_NORESOURCE */
305 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
306 -EMSGSIZE, /* BCME_BADLENGTH */
307 -EINVAL, /* BCME_NOTREADY */
308 -EPERM, /* BCME_EPERM */
309 -ENOMEM, /* BCME_NOMEM */
310 -EINVAL, /* BCME_ASSOCIATED */
311 -ERANGE, /* BCME_RANGE */
312 -EINVAL, /* BCME_NOTFOUND */
313 -EINVAL, /* BCME_WME_NOT_ENABLED */
314 -EINVAL, /* BCME_TSPEC_NOTFOUND */
315 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
316 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
317 -EIO, /* BCME_SDIO_ERROR */
318 -ENODEV, /* BCME_DONGLE_DOWN */
319 -EINVAL, /* BCME_VERSION */
320 -EIO, /* BCME_TXFAIL */
321 -EIO, /* BCME_RXFAIL */
322 -ENODEV, /* BCME_NODEVICE */
323 -EINVAL, /* BCME_NMODE_DISABLED */
324 -ENODATA, /* BCME_NONRESIDENT */
325 -EINVAL, /* BCME_SCANREJECT */
326 -EINVAL, /* BCME_USAGE_ERROR */
327 -EIO, /* BCME_IOCTL_ERROR */
328 -EIO, /* BCME_SERIAL_PORT_ERR */
329 -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
330 -EIO, /* BCME_DECERR */
331 -EIO, /* BCME_ENCERR */
332 -EIO, /* BCME_MICERR */
333 -ERANGE, /* BCME_REPLAY */
334 -EINVAL, /* BCME_IE_NOTFOUND */
335 -EINVAL, /* BCME_DATA_NOTFOUND */
336 -EINVAL, /* BCME_NOT_GC */
337 -EINVAL, /* BCME_PRS_REQ_FAILED */
338 -EINVAL, /* BCME_NO_P2P_SE */
339 -EINVAL, /* BCME_NOA_PND */
340 -EINVAL, /* BCME_FRAG_Q_FAILED */
341 -EINVAL, /* BCME_GET_AF_FAILED */
342 -EINVAL, /* BCME_MSCH_NOTREADY */
343
344 /* When an new error code is added to bcmutils.h, add os
345 * specific error translation here as well
346 */
347 /* check if BCME_LAST changed since the last time this function was updated */
348 #if BCME_LAST != -60
349 #error "You need to add a OS error translation in the linuxbcmerrormap \
350 for new error code defined in bcmutils.h"
351 #endif
352 };
353 uint lmtest = FALSE;
354
355 /* translate bcmerrors into linux errors */
356 int
357 osl_error(int bcmerror)
358 {
359 if (bcmerror > 0)
360 bcmerror = 0;
361 else if (bcmerror < BCME_LAST)
362 bcmerror = BCME_ERROR;
363
364 /* Array bounds covered by ASSERT in osl_attach */
365 return linuxbcmerrormap[-bcmerror];
366 }
367 osl_t *
368 osl_attach(void *pdev, uint bustype, bool pkttag)
369 {
370 void **osl_cmn = NULL;
371 osl_t *osh;
372 gfp_t flags;
373 #ifdef BCM_SECURE_DMA
374 u32 secdma_memsize;
375 #endif
376
377 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
378 if (!(osh = kmalloc(sizeof(osl_t), flags)))
379 return osh;
380
381 ASSERT(osh);
382
383 bzero(osh, sizeof(osl_t));
384
385 if (osl_cmn == NULL || *osl_cmn == NULL) {
386 if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
387 kfree(osh);
388 return NULL;
389 }
390 bzero(osh->cmn, sizeof(osl_cmn_t));
391 if (osl_cmn)
392 *osl_cmn = osh->cmn;
393 atomic_set(&osh->cmn->malloced, 0);
394 osh->cmn->dbgmem_list = NULL;
395 spin_lock_init(&(osh->cmn->dbgmem_lock));
396
397 spin_lock_init(&(osh->cmn->pktalloc_lock));
398
399 } else {
400 osh->cmn = *osl_cmn;
401 }
402 atomic_add(1, &osh->cmn->refcount);
403
404 bcm_object_trace_init();
405
406 /* Check that error map has the right number of entries in it */
407 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
408
409 osh->failed = 0;
410 osh->pdev = pdev;
411 osh->pub.pkttag = pkttag;
412 osh->bustype = bustype;
413 osh->magic = OS_HANDLE_MAGIC;
414 #ifdef BCM_SECURE_DMA
415
416 if ((secdma_addr != 0) && (secdma_size != 0)) {
417 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
418 if (secdma_found == 0) {
419 osh->contig_base_alloc = (phys_addr_t)secdma_addr;
420 secdma_memsize = secdma_size;
421 } else if (secdma_found == 1) {
422 osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
423 secdma_memsize = secdma_size2;
424 } else {
425 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
426 kfree(osh);
427 return NULL;
428 }
429 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
430 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
431 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
432 (unsigned int)osh->contig_base_alloc);
433 osh->stb_ext_params = SECDMA_MODULE_PARAMS;
434 }
435 else if (stbpriv_init(osh) == 0) {
436 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
437 if (secdma_found == 0) {
438 osh->contig_base_alloc =
439 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
440 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
441 } else if (secdma_found == 1) {
442 osh->contig_base_alloc =
443 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
444 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
445 } else {
446 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
447 kfree(osh);
448 return NULL;
449 }
450 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
451 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
452 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
453 (unsigned int)osh->contig_base_alloc);
454 osh->stb_ext_params = SECDMA_EXT_FILE;
455 }
456 else {
457 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
458 kfree(osh);
459 return NULL;
460 }
461 secdma_found++;
462 osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
463 phys_to_page((u32)osh->contig_base_alloc),
464 CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
465
466 if (osh->contig_base_alloc_coherent_va == NULL) {
467 if (osh->cmn)
468 kfree(osh->cmn);
469 kfree(osh);
470 return NULL;
471 }
472 osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
473 osh->contig_base_alloc_coherent = osh->contig_base_alloc;
474 osl_sec_dma_init_consistent(osh);
475
476 osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
477
478 osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
479 phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
480 if (osh->contig_base_alloc_va == NULL) {
481 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
482 if (osh->cmn)
483 kfree(osh->cmn);
484 kfree(osh);
485 return NULL;
486 }
487 osh->contig_base_va = osh->contig_base_alloc_va;
488
489 #ifdef NOT_YET
490 /*
491 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
492 * osh->sec_list_base_512 = osh->sec_list_512;
493 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
494 * osh->sec_list_base_2048 = osh->sec_list_2048;
495 */
496 #endif
497 if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
498 CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
499 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
500 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
501 if (osh->cmn)
502 kfree(osh->cmn);
503 kfree(osh);
504 return NULL;
505 }
506 osh->sec_list_base_4096 = osh->sec_list_4096;
507
508 #endif /* BCM_SECURE_DMA */
509
510 switch (bustype) {
511 case PCI_BUS:
512 case SI_BUS:
513 case PCMCIA_BUS:
514 osh->pub.mmbus = TRUE;
515 break;
516 case JTAG_BUS:
517 case SDIO_BUS:
518 case USB_BUS:
519 case SPI_BUS:
520 case RPC_BUS:
521 osh->pub.mmbus = FALSE;
522 break;
523 default:
524 ASSERT(FALSE);
525 break;
526 }
527
528
529
530 return osh;
531 }
532
533 int osl_static_mem_init(osl_t *osh, void *adapter)
534 {
535 #ifdef CONFIG_DHD_USE_STATIC_BUF
536 if (!bcm_static_buf && adapter) {
537 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
538 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
539 printk("can not alloc static buf!\n");
540 bcm_static_skb = NULL;
541 ASSERT(osh->magic == OS_HANDLE_MAGIC);
542 return -ENOMEM;
543 } else {
544 printk("alloc static buf at %p!\n", bcm_static_buf);
545 }
546
547 spin_lock_init(&bcm_static_buf->static_lock);
548
549 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
550 }
551
552 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
553 if (!bcm_static_skb && adapter) {
554 int i;
555 void *skb_buff_ptr = 0;
556 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
557 skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
558 if (!skb_buff_ptr) {
559 printk("cannot alloc static buf!\n");
560 bcm_static_buf = NULL;
561 bcm_static_skb = NULL;
562 ASSERT(osh->magic == OS_HANDLE_MAGIC);
563 return -ENOMEM;
564 }
565
566 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
567 (STATIC_PKT_MAX_NUM));
568 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
569 bcm_static_skb->pkt_use[i] = 0;
570 }
571
572 #ifdef DHD_USE_STATIC_CTRLBUF
573 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
574 bcm_static_skb->last_allocated_index = 0;
575 #else
576 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
577 #endif /* DHD_USE_STATIC_CTRLBUF */
578 }
579 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
580 #endif /* CONFIG_DHD_USE_STATIC_BUF */
581
582 return 0;
583 }
584
585 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
586 {
587 osh->bus_handle = bus_handle;
588 }
589
590 void* osl_get_bus_handle(osl_t *osh)
591 {
592 return osh->bus_handle;
593 }
594
595 void
596 osl_detach(osl_t *osh)
597 {
598 if (osh == NULL)
599 return;
600
601 #ifdef BCM_SECURE_DMA
602 if (osh->stb_ext_params == SECDMA_EXT_FILE)
603 stbpriv_exit(osh);
604 #ifdef NOT_YET
605 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
606 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
607 #endif /* NOT_YET */
608 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
609 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
610 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
611 secdma_found--;
612 #endif /* BCM_SECURE_DMA */
613
614
615 bcm_object_trace_deinit();
616
617 ASSERT(osh->magic == OS_HANDLE_MAGIC);
618 atomic_sub(1, &osh->cmn->refcount);
619 if (atomic_read(&osh->cmn->refcount) == 0) {
620 kfree(osh->cmn);
621 }
622 kfree(osh);
623 }
624
625 int osl_static_mem_deinit(osl_t *osh, void *adapter)
626 {
627 #ifdef CONFIG_DHD_USE_STATIC_BUF
628 if (bcm_static_buf) {
629 bcm_static_buf = 0;
630 }
631 #ifdef BCMSDIO
632 if (bcm_static_skb) {
633 bcm_static_skb = 0;
634 }
635 #endif /* BCMSDIO */
636 #endif /* CONFIG_DHD_USE_STATIC_BUF */
637 return 0;
638 }
639
640 /* APIs to set/get specific quirks in OSL layer */
641 void BCMFASTPATH
642 osl_flag_set(osl_t *osh, uint32 mask)
643 {
644 osh->flags |= mask;
645 }
646
647 void
648 osl_flag_clr(osl_t *osh, uint32 mask)
649 {
650 osh->flags &= ~mask;
651 }
652
653 #if defined(STB)
654 inline bool BCMFASTPATH
655 #else
656 bool
657 #endif
658 osl_is_flag_set(osl_t *osh, uint32 mask)
659 {
660 return (osh->flags & mask);
661 }
662
663
664 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
665
666 inline int BCMFASTPATH
667 osl_arch_is_coherent(void)
668 {
669 return 0;
670 }
671
672 inline int BCMFASTPATH
673 osl_acp_war_enab(void)
674 {
675 return 0;
676 }
677
678 inline void BCMFASTPATH
679 osl_cache_flush(void *va, uint size)
680 {
681
682 if (size > 0)
683 dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TO_DEVICE);
684 }
685
686 inline void BCMFASTPATH
687 osl_cache_inv(void *va, uint size)
688 {
689
690 dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
691 }
692
693 inline void BCMFASTPATH
694 osl_prefetch(const void *ptr)
695 {
696 __asm__ __volatile__("pld\t%0" :: "o"(*(char *)ptr) : "cc");
697 }
698
699 #endif
700
701 /*
702 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
703 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
704 * explicitly managed from a coherency perspective.
705 */
706 static inline void BCMFASTPATH
707 osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
708 {
709 }
710
711 static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
712 {
713 struct sk_buff *skb;
714 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
715 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
716 #ifdef DHD_USE_ATOMIC_PKTGET
717 flags = GFP_ATOMIC;
718 #endif /* DHD_USE_ATOMIC_PKTGET */
719 skb = __dev_alloc_skb(len, flags);
720 #else
721 skb = dev_alloc_skb(len);
722 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
723 return skb;
724 }
725
726 #ifdef CTFPOOL
727
728 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
729 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
730 /*
731 * Allocate and add an object to packet pool.
732 */
733 void *
734 osl_ctfpool_add(osl_t *osh)
735 {
736 struct sk_buff *skb;
737
738 if ((osh == NULL) || (osh->ctfpool == NULL))
739 return NULL;
740
741 CTFPOOL_LOCK(osh->ctfpool, flags);
742 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
743
744 /* No need to allocate more objects */
745 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
746 CTFPOOL_UNLOCK(osh->ctfpool, flags);
747 return NULL;
748 }
749
750 /* Allocate a new skb and add it to the ctfpool */
751 skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
752 if (skb == NULL) {
753 printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
754 osh->ctfpool->obj_size);
755 CTFPOOL_UNLOCK(osh->ctfpool, flags);
756 return NULL;
757 }
758
759 /* Add to ctfpool */
760 skb->next = (struct sk_buff *)osh->ctfpool->head;
761 osh->ctfpool->head = skb;
762 osh->ctfpool->fast_frees++;
763 osh->ctfpool->curr_obj++;
764
765 /* Hijack a skb member to store ptr to ctfpool */
766 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
767
768 /* Use bit flag to indicate skb from fast ctfpool */
769 PKTFAST(osh, skb) = FASTBUF;
770
771 /* If ctfpool's osh is a fwder osh, reset the fwder buf */
772 osl_fwderbuf_reset(osh->ctfpool->osh, skb);
773
774 CTFPOOL_UNLOCK(osh->ctfpool, flags);
775
776 return skb;
777 }
778
779 /*
780 * Add new objects to the pool.
781 */
782 void
783 osl_ctfpool_replenish(osl_t *osh, uint thresh)
784 {
785 if ((osh == NULL) || (osh->ctfpool == NULL))
786 return;
787
788 /* Do nothing if no refills are required */
789 while ((osh->ctfpool->refills > 0) && (thresh--)) {
790 osl_ctfpool_add(osh);
791 osh->ctfpool->refills--;
792 }
793 }
794
795 /*
796 * Initialize the packet pool with specified number of objects.
797 */
798 int32
799 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
800 {
801 gfp_t flags;
802
803 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
804 osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
805 ASSERT(osh->ctfpool);
806
807 osh->ctfpool->osh = osh;
808
809 osh->ctfpool->max_obj = numobj;
810 osh->ctfpool->obj_size = size;
811
812 spin_lock_init(&osh->ctfpool->lock);
813
814 while (numobj--) {
815 if (!osl_ctfpool_add(osh))
816 return -1;
817 osh->ctfpool->fast_frees--;
818 }
819
820 return 0;
821 }
822
823 /*
824 * Cleanup the packet pool objects.
825 */
826 void
827 osl_ctfpool_cleanup(osl_t *osh)
828 {
829 struct sk_buff *skb, *nskb;
830
831 if ((osh == NULL) || (osh->ctfpool == NULL))
832 return;
833
834 CTFPOOL_LOCK(osh->ctfpool, flags);
835
836 skb = osh->ctfpool->head;
837
838 while (skb != NULL) {
839 nskb = skb->next;
840 dev_kfree_skb(skb);
841 skb = nskb;
842 osh->ctfpool->curr_obj--;
843 }
844
845 ASSERT(osh->ctfpool->curr_obj == 0);
846 osh->ctfpool->head = NULL;
847 CTFPOOL_UNLOCK(osh->ctfpool, flags);
848
849 kfree(osh->ctfpool);
850 osh->ctfpool = NULL;
851 }
852
853 void
854 osl_ctfpool_stats(osl_t *osh, void *b)
855 {
856 struct bcmstrbuf *bb;
857
858 if ((osh == NULL) || (osh->ctfpool == NULL))
859 return;
860
861 #ifdef CONFIG_DHD_USE_STATIC_BUF
862 if (bcm_static_buf) {
863 bcm_static_buf = 0;
864 }
865 #ifdef BCMSDIO
866 if (bcm_static_skb) {
867 bcm_static_skb = 0;
868 }
869 #endif /* BCMSDIO */
870 #endif /* CONFIG_DHD_USE_STATIC_BUF */
871
872 bb = b;
873
874 ASSERT((osh != NULL) && (bb != NULL));
875
876 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
877 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
878 osh->ctfpool->curr_obj, osh->ctfpool->refills);
879 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
880 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
881 osh->ctfpool->slow_allocs);
882 }
883
884 static inline struct sk_buff *
885 osl_pktfastget(osl_t *osh, uint len)
886 {
887 struct sk_buff *skb;
888
889 /* Try to do fast allocate. Return null if ctfpool is not in use
890 * or if there are no items in the ctfpool.
891 */
892 if (osh->ctfpool == NULL)
893 return NULL;
894
895 CTFPOOL_LOCK(osh->ctfpool, flags);
896 if (osh->ctfpool->head == NULL) {
897 ASSERT(osh->ctfpool->curr_obj == 0);
898 osh->ctfpool->slow_allocs++;
899 CTFPOOL_UNLOCK(osh->ctfpool, flags);
900 return NULL;
901 }
902
903 if (len > osh->ctfpool->obj_size) {
904 CTFPOOL_UNLOCK(osh->ctfpool, flags);
905 return NULL;
906 }
907
908 ASSERT(len <= osh->ctfpool->obj_size);
909
910 /* Get an object from ctfpool */
911 skb = (struct sk_buff *)osh->ctfpool->head;
912 osh->ctfpool->head = (void *)skb->next;
913
914 osh->ctfpool->fast_allocs++;
915 osh->ctfpool->curr_obj--;
916 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
917 CTFPOOL_UNLOCK(osh->ctfpool, flags);
918
919 /* Init skb struct */
920 skb->next = skb->prev = NULL;
921 #if defined(__ARM_ARCH_7A__)
922 skb->data = skb->head + NET_SKB_PAD;
923 skb->tail = skb->head + NET_SKB_PAD;
924 #else
925 skb->data = skb->head + 16;
926 skb->tail = skb->head + 16;
927 #endif /* __ARM_ARCH_7A__ */
928 skb->len = 0;
929 skb->cloned = 0;
930 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
931 skb->list = NULL;
932 #endif
933 atomic_set(&skb->users, 1);
934
935 PKTSETCLINK(skb, NULL);
936 PKTCCLRATTR(skb);
937 PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
938
939 return skb;
940 }
941 #endif /* CTFPOOL */
942
943
944 /* Convert a driver packet to native(OS) packet
945 * In the process, packettag is zeroed out before sending up
946 * IP code depends on skb->cb to be setup correctly with various options
947 * In our case, that means it should be 0
948 */
949 struct sk_buff * BCMFASTPATH
950 osl_pkt_tonative(osl_t *osh, void *pkt)
951 {
952 struct sk_buff *nskb;
953
954 if (osh->pub.pkttag)
955 OSL_PKTTAG_CLEAR(pkt);
956
957 /* Decrement the packet counter */
958 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
959 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
960
961 }
962 return (struct sk_buff *)pkt;
963 }
964
965 /* Convert a native(OS) packet to driver packet.
966 * In the process, native packet is destroyed, there is no copying
967 * Also, a packettag is zeroed out
968 */
969 void * BCMFASTPATH
970 osl_pkt_frmnative(osl_t *osh, void *pkt)
971 {
972 struct sk_buff *cskb;
973 struct sk_buff *nskb;
974 unsigned long pktalloced = 0;
975
976 if (osh->pub.pkttag)
977 OSL_PKTTAG_CLEAR(pkt);
978
979 /* walk the PKTCLINK() list */
980 for (cskb = (struct sk_buff *)pkt;
981 cskb != NULL;
982 cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
983
984 /* walk the pkt buffer list */
985 for (nskb = cskb; nskb; nskb = nskb->next) {
986
987 /* Increment the packet counter */
988 pktalloced++;
989
990 /* clean the 'prev' pointer
991 * Kernel 3.18 is leaving skb->prev pointer set to skb
992 * to indicate a non-fragmented skb
993 */
994 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
995 nskb->prev = NULL;
996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
997
998
999 }
1000 }
1001
1002 /* Increment the packet counter */
1003 atomic_add(pktalloced, &osh->cmn->pktalloced);
1004
1005 return (void *)pkt;
1006 }
1007
1008 /* Return a new packet. zero out pkttag */
1009 #ifdef BCM_OBJECT_TRACE
1010 void * BCMFASTPATH
1011 osl_pktget(osl_t *osh, uint len, int line, const char *caller)
1012 #else
1013 void * BCMFASTPATH
1014 osl_pktget(osl_t *osh, uint len)
1015 #endif /* BCM_OBJECT_TRACE */
1016 {
1017 struct sk_buff *skb;
1018 uchar num = 0;
1019 if (lmtest != FALSE) {
1020 get_random_bytes(&num, sizeof(uchar));
1021 if ((num + 1) <= (256 * lmtest / 100))
1022 return NULL;
1023 }
1024
1025 #ifdef CTFPOOL
1026 /* Allocate from local pool */
1027 skb = osl_pktfastget(osh, len);
1028 if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
1029 #else /* CTFPOOL */
1030 if ((skb = osl_alloc_skb(osh, len))) {
1031 #endif /* CTFPOOL */
1032 skb->tail += len;
1033 skb->len += len;
1034 skb->priority = 0;
1035
1036 atomic_inc(&osh->cmn->pktalloced);
1037 #ifdef BCM_OBJECT_TRACE
1038 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
1039 #endif /* BCM_OBJECT_TRACE */
1040 }
1041
1042 return ((void*) skb);
1043 }
1044
1045 #ifdef CTFPOOL
1046 static inline void
1047 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
1048 {
1049 ctfpool_t *ctfpool;
1050
1051 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1052 skb->tstamp.tv.sec = 0;
1053 #else
1054 skb->stamp.tv_sec = 0;
1055 #endif
1056
1057 /* We only need to init the fields that we change */
1058 skb->dev = NULL;
1059 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
1060 skb->dst = NULL;
1061 #endif
1062 OSL_PKTTAG_CLEAR(skb);
1063 skb->ip_summed = 0;
1064
1065 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1066 skb_orphan(skb);
1067 #else
1068 skb->destructor = NULL;
1069 #endif
1070
1071 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1072 ASSERT(ctfpool != NULL);
1073
1074 /* if osh is a fwder osh, reset the fwder buf */
1075 osl_fwderbuf_reset(ctfpool->osh, skb);
1076
1077 /* Add object to the ctfpool */
1078 CTFPOOL_LOCK(ctfpool, flags);
1079 skb->next = (struct sk_buff *)ctfpool->head;
1080 ctfpool->head = (void *)skb;
1081
1082 ctfpool->fast_frees++;
1083 ctfpool->curr_obj++;
1084
1085 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
1086 CTFPOOL_UNLOCK(ctfpool, flags);
1087 }
1088 #endif /* CTFPOOL */
1089
1090 /* Free the driver packet. Free the tag if present */
1091 #ifdef BCM_OBJECT_TRACE
1092 void BCMFASTPATH
1093 osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
1094 #else
1095 void BCMFASTPATH
1096 osl_pktfree(osl_t *osh, void *p, bool send)
1097 #endif /* BCM_OBJECT_TRACE */
1098 {
1099 struct sk_buff *skb, *nskb;
1100 if (osh == NULL)
1101 return;
1102
1103 skb = (struct sk_buff*) p;
1104
1105 if (send && osh->pub.tx_fn)
1106 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
1107
1108 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
1109
1110 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
1111 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
1112 printk("%s: pkt %p is from static pool\n",
1113 __FUNCTION__, p);
1114 dump_stack();
1115 return;
1116 }
1117
1118 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
1119 printk("%s: pkt %p is from static pool and not in used\n",
1120 __FUNCTION__, p);
1121 dump_stack();
1122 return;
1123 }
1124 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
1125
1126 /* perversion: we use skb->next to chain multi-skb packets */
1127 while (skb) {
1128 nskb = skb->next;
1129 skb->next = NULL;
1130
1131
1132
1133 #ifdef BCM_OBJECT_TRACE
1134 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
1135 #endif /* BCM_OBJECT_TRACE */
1136
1137 #ifdef CTFPOOL
1138 if (PKTISFAST(osh, skb)) {
1139 if (atomic_read(&skb->users) == 1)
1140 smp_rmb();
1141 else if (!atomic_dec_and_test(&skb->users))
1142 goto next_skb;
1143 osl_pktfastfree(osh, skb);
1144 } else
1145 #endif
1146 {
1147 dev_kfree_skb_any(skb);
1148 }
1149 #ifdef CTFPOOL
1150 next_skb:
1151 #endif
1152 atomic_dec(&osh->cmn->pktalloced);
1153 skb = nskb;
1154 }
1155 }
1156
1157 #ifdef CONFIG_DHD_USE_STATIC_BUF
1158 void*
1159 osl_pktget_static(osl_t *osh, uint len)
1160 {
1161 int i = 0;
1162 struct sk_buff *skb;
1163 #ifdef DHD_USE_STATIC_CTRLBUF
1164 unsigned long flags;
1165 #endif /* DHD_USE_STATIC_CTRLBUF */
1166
1167 if (!bcm_static_skb)
1168 return osl_pktget(osh, len);
1169
1170 if (len > DHD_SKB_MAX_BUFSIZE) {
1171 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
1172 return osl_pktget(osh, len);
1173 }
1174
1175 #ifdef DHD_USE_STATIC_CTRLBUF
1176 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
1177
1178 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
1179 uint32 index;
1180 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
1181 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
1182 bcm_static_skb->last_allocated_index++;
1183 if (bcm_static_skb->skb_8k[index] &&
1184 bcm_static_skb->pkt_use[index] == 0) {
1185 break;
1186 }
1187 }
1188
1189 if ((i != STATIC_PKT_2PAGE_NUM) &&
1190 (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
1191 bcm_static_skb->pkt_use[index] = 1;
1192 skb = bcm_static_skb->skb_8k[index];
1193 skb->data = skb->head;
1194 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1195 skb_set_tail_pointer(skb, NET_SKB_PAD);
1196 #else
1197 skb->tail = skb->data + NET_SKB_PAD;
1198 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1199 skb->data += NET_SKB_PAD;
1200 skb->cloned = 0;
1201 skb->priority = 0;
1202 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1203 skb_set_tail_pointer(skb, len);
1204 #else
1205 skb->tail = skb->data + len;
1206 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1207 skb->len = len;
1208 skb->mac_len = PREALLOC_USED_MAGIC;
1209 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1210 return skb;
1211 }
1212 }
1213
1214 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1215 printk("%s: all static pkt in use!\n", __FUNCTION__);
1216 return NULL;
1217 #else
1218 down(&bcm_static_skb->osl_pkt_sem);
1219
1220 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
1221 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
1222 if (bcm_static_skb->skb_4k[i] &&
1223 bcm_static_skb->pkt_use[i] == 0) {
1224 break;
1225 }
1226 }
1227
1228 if (i != STATIC_PKT_1PAGE_NUM) {
1229 bcm_static_skb->pkt_use[i] = 1;
1230
1231 skb = bcm_static_skb->skb_4k[i];
1232 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1233 skb_set_tail_pointer(skb, len);
1234 #else
1235 skb->tail = skb->data + len;
1236 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1237 skb->len = len;
1238
1239 up(&bcm_static_skb->osl_pkt_sem);
1240 return skb;
1241 }
1242 }
1243
1244 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
1245 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
1246 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
1247 bcm_static_skb->pkt_use[i] == 0) {
1248 break;
1249 }
1250 }
1251
1252 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
1253 bcm_static_skb->pkt_use[i] = 1;
1254 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
1255 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1256 skb_set_tail_pointer(skb, len);
1257 #else
1258 skb->tail = skb->data + len;
1259 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1260 skb->len = len;
1261
1262 up(&bcm_static_skb->osl_pkt_sem);
1263 return skb;
1264 }
1265 }
1266
1267 #if defined(ENHANCED_STATIC_BUF)
1268 if (bcm_static_skb->skb_16k &&
1269 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
1270 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
1271
1272 skb = bcm_static_skb->skb_16k;
1273 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1274 skb_set_tail_pointer(skb, len);
1275 #else
1276 skb->tail = skb->data + len;
1277 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1278 skb->len = len;
1279
1280 up(&bcm_static_skb->osl_pkt_sem);
1281 return skb;
1282 }
1283 #endif /* ENHANCED_STATIC_BUF */
1284
1285 up(&bcm_static_skb->osl_pkt_sem);
1286 printk("%s: all static pkt in use!\n", __FUNCTION__);
1287 return osl_pktget(osh, len);
1288 #endif /* DHD_USE_STATIC_CTRLBUF */
1289 }
1290
1291 void
1292 osl_pktfree_static(osl_t *osh, void *p, bool send)
1293 {
1294 int i;
1295 #ifdef DHD_USE_STATIC_CTRLBUF
1296 struct sk_buff *skb = (struct sk_buff *)p;
1297 unsigned long flags;
1298 #endif /* DHD_USE_STATIC_CTRLBUF */
1299
1300 if (!p) {
1301 return;
1302 }
1303
1304 if (!bcm_static_skb) {
1305 osl_pktfree(osh, p, send);
1306 return;
1307 }
1308
1309 #ifdef DHD_USE_STATIC_CTRLBUF
1310 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
1311
1312 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
1313 if (p == bcm_static_skb->skb_8k[i]) {
1314 if (bcm_static_skb->pkt_use[i] == 0) {
1315 printk("%s: static pkt idx %d(%p) is double free\n",
1316 __FUNCTION__, i, p);
1317 } else {
1318 bcm_static_skb->pkt_use[i] = 0;
1319 }
1320
1321 if (skb->mac_len != PREALLOC_USED_MAGIC) {
1322 printk("%s: static pkt idx %d(%p) is not in used\n",
1323 __FUNCTION__, i, p);
1324 }
1325
1326 skb->mac_len = PREALLOC_FREE_MAGIC;
1327 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1328 return;
1329 }
1330 }
1331
1332 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1333 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
1334 #else
1335 down(&bcm_static_skb->osl_pkt_sem);
1336 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
1337 if (p == bcm_static_skb->skb_4k[i]) {
1338 bcm_static_skb->pkt_use[i] = 0;
1339 up(&bcm_static_skb->osl_pkt_sem);
1340 return;
1341 }
1342 }
1343
1344 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
1345 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
1346 bcm_static_skb->pkt_use[i] = 0;
1347 up(&bcm_static_skb->osl_pkt_sem);
1348 return;
1349 }
1350 }
1351 #ifdef ENHANCED_STATIC_BUF
1352 if (p == bcm_static_skb->skb_16k) {
1353 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
1354 up(&bcm_static_skb->osl_pkt_sem);
1355 return;
1356 }
1357 #endif
1358 up(&bcm_static_skb->osl_pkt_sem);
1359 #endif /* DHD_USE_STATIC_CTRLBUF */
1360 osl_pktfree(osh, p, send);
1361 }
1362 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1363
1364 uint32
1365 osl_pci_read_config(osl_t *osh, uint offset, uint size)
1366 {
1367 uint val = 0;
1368 uint retry = PCI_CFG_RETRY;
1369
1370 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1371
1372 /* only 4byte access supported */
1373 ASSERT(size == 4);
1374
1375 do {
1376 pci_read_config_dword(osh->pdev, offset, &val);
1377 if (val != 0xffffffff)
1378 break;
1379 } while (retry--);
1380
1381
1382 return (val);
1383 }
1384
1385 void
1386 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
1387 {
1388 uint retry = PCI_CFG_RETRY;
1389
1390 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1391
1392 /* only 4byte access supported */
1393 ASSERT(size == 4);
1394
1395 do {
1396 pci_write_config_dword(osh->pdev, offset, val);
1397 if (offset != PCI_BAR0_WIN)
1398 break;
1399 if (osl_pci_read_config(osh, offset, size) == val)
1400 break;
1401 } while (retry--);
1402
1403 }
1404
1405 /* return bus # for the pci device pointed by osh->pdev */
1406 uint
1407 osl_pci_bus(osl_t *osh)
1408 {
1409 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1410
1411 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1412 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1413 #else
1414 return ((struct pci_dev *)osh->pdev)->bus->number;
1415 #endif
1416 }
1417
1418 /* return slot # for the pci device pointed by osh->pdev */
1419 uint
1420 osl_pci_slot(osl_t *osh)
1421 {
1422 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1423
1424 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1425 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
1426 #else
1427 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
1428 #endif
1429 }
1430
1431 /* return domain # for the pci device pointed by osh->pdev */
1432 uint
1433 osl_pcie_domain(osl_t *osh)
1434 {
1435 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1436
1437 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1438 }
1439
1440 /* return bus # for the pci device pointed by osh->pdev */
1441 uint
1442 osl_pcie_bus(osl_t *osh)
1443 {
1444 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1445
1446 return ((struct pci_dev *)osh->pdev)->bus->number;
1447 }
1448
1449 /* return the pci device pointed by osh->pdev */
1450 struct pci_dev *
1451 osl_pci_device(osl_t *osh)
1452 {
1453 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1454
1455 return osh->pdev;
1456 }
1457
1458 static void
1459 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
1460 {
1461 }
1462
1463 void
1464 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
1465 {
1466 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
1467 }
1468
1469 void
1470 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
1471 {
1472 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
1473 }
1474
1475 void *
1476 osl_malloc(osl_t *osh, uint size)
1477 {
1478 void *addr;
1479 gfp_t flags;
1480
1481 /* only ASSERT if osh is defined */
1482 if (osh)
1483 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1484 #ifdef CONFIG_DHD_USE_STATIC_BUF
1485 if (bcm_static_buf)
1486 {
1487 unsigned long irq_flags;
1488 int i = 0;
1489 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
1490 {
1491 spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
1492
1493 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
1494 {
1495 if (bcm_static_buf->buf_use[i] == 0)
1496 break;
1497 }
1498
1499 if (i == STATIC_BUF_MAX_NUM)
1500 {
1501 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1502 printk("all static buff in use!\n");
1503 goto original;
1504 }
1505
1506 bcm_static_buf->buf_use[i] = 1;
1507 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1508
1509 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
1510 if (osh)
1511 atomic_add(size, &osh->cmn->malloced);
1512
1513 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
1514 }
1515 }
1516 original:
1517 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1518
1519 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1520 if ((addr = kmalloc(size, flags)) == NULL) {
1521 if (osh)
1522 osh->failed++;
1523 return (NULL);
1524 }
1525 if (osh && osh->cmn)
1526 atomic_add(size, &osh->cmn->malloced);
1527
1528 return (addr);
1529 }
1530
1531 void *
1532 osl_mallocz(osl_t *osh, uint size)
1533 {
1534 void *ptr;
1535
1536 ptr = osl_malloc(osh, size);
1537
1538 if (ptr != NULL) {
1539 bzero(ptr, size);
1540 }
1541
1542 return ptr;
1543 }
1544
1545 void
1546 osl_mfree(osl_t *osh, void *addr, uint size)
1547 {
1548 #ifdef CONFIG_DHD_USE_STATIC_BUF
1549 unsigned long flags;
1550
1551 if (bcm_static_buf)
1552 {
1553 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
1554 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
1555 {
1556 int buf_idx = 0;
1557
1558 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
1559
1560 spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
1561 bcm_static_buf->buf_use[buf_idx] = 0;
1562 spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
1563
1564 if (osh && osh->cmn) {
1565 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1566 atomic_sub(size, &osh->cmn->malloced);
1567 }
1568 return;
1569 }
1570 }
1571 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1572 if (osh && osh->cmn) {
1573 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1574
1575 ASSERT(size <= osl_malloced(osh));
1576
1577 atomic_sub(size, &osh->cmn->malloced);
1578 }
1579 kfree(addr);
1580 }
1581
1582 void *
1583 osl_vmalloc(osl_t *osh, uint size)
1584 {
1585 void *addr;
1586
1587 /* only ASSERT if osh is defined */
1588 if (osh)
1589 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1590 if ((addr = vmalloc(size)) == NULL) {
1591 if (osh)
1592 osh->failed++;
1593 return (NULL);
1594 }
1595 if (osh && osh->cmn)
1596 atomic_add(size, &osh->cmn->malloced);
1597
1598 return (addr);
1599 }
1600
1601 void *
1602 osl_vmallocz(osl_t *osh, uint size)
1603 {
1604 void *ptr;
1605
1606 ptr = osl_vmalloc(osh, size);
1607
1608 if (ptr != NULL) {
1609 bzero(ptr, size);
1610 }
1611
1612 return ptr;
1613 }
1614
1615 void
1616 osl_vmfree(osl_t *osh, void *addr, uint size)
1617 {
1618 if (osh && osh->cmn) {
1619 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1620
1621 ASSERT(size <= osl_malloced(osh));
1622
1623 atomic_sub(size, &osh->cmn->malloced);
1624 }
1625 vfree(addr);
1626 }
1627
1628 uint
1629 osl_check_memleak(osl_t *osh)
1630 {
1631 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1632 if (atomic_read(&osh->cmn->refcount) == 1)
1633 return (atomic_read(&osh->cmn->malloced));
1634 else
1635 return 0;
1636 }
1637
1638 uint
1639 osl_malloced(osl_t *osh)
1640 {
1641 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1642 return (atomic_read(&osh->cmn->malloced));
1643 }
1644
1645 uint
1646 osl_malloc_failed(osl_t *osh)
1647 {
1648 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1649 return (osh->failed);
1650 }
1651
1652
1653 uint
1654 osl_dma_consistent_align(void)
1655 {
1656 return (PAGE_SIZE);
1657 }
1658
1659 void*
1660 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
1661 {
1662 void *va;
1663 uint16 align = (1 << align_bits);
1664 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1665
1666 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1667 size += align;
1668 *alloced = size;
1669
1670 #ifndef BCM_SECURE_DMA
1671 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1672 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1673 if (va)
1674 *pap = (ulong)__virt_to_phys((ulong)va);
1675 #else
1676 {
1677 dma_addr_t pap_lin;
1678 struct pci_dev *hwdev = osh->pdev;
1679 gfp_t flags;
1680 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1681 flags = GFP_ATOMIC;
1682 #else
1683 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1684 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1685 va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
1686 #ifdef BCMDMA64OSL
1687 PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
1688 PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
1689 #else
1690 *pap = (dmaaddr_t)pap_lin;
1691 #endif /* BCMDMA64OSL */
1692 }
1693 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1694 #else
1695 va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
1696 #endif /* BCM_SECURE_DMA */
1697 return va;
1698 }
1699
1700 void
1701 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1702 {
1703 #ifdef BCMDMA64OSL
1704 dma_addr_t paddr;
1705 #endif /* BCMDMA64OSL */
1706 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1707
1708 #ifndef BCM_SECURE_DMA
1709 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1710 kfree(va);
1711 #else
1712 #ifdef BCMDMA64OSL
1713 PHYSADDRTOULONG(pa, paddr);
1714 pci_free_consistent(osh->pdev, size, va, paddr);
1715 #else
1716 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1717 #endif /* BCMDMA64OSL */
1718 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1719 #else
1720 osl_sec_dma_free_consistent(osh, va, size, pa);
1721 #endif /* BCM_SECURE_DMA */
1722 }
1723
1724 dmaaddr_t BCMFASTPATH
1725 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1726 {
1727 int dir;
1728 dmaaddr_t ret_addr;
1729 dma_addr_t map_addr;
1730 int ret;
1731
1732 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1733 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1734
1735
1736
1737
1738 map_addr = pci_map_single(osh->pdev, va, size, dir);
1739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
1740 ret = pci_dma_mapping_error(osh->pdev, map_addr);
1741 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
1742 ret = pci_dma_mapping_error(map_addr);
1743 #else
1744 ret = 0;
1745 #endif
1746 if (ret) {
1747 printk("%s: Failed to map memory\n", __FUNCTION__);
1748 PHYSADDRLOSET(ret_addr, 0);
1749 PHYSADDRHISET(ret_addr, 0);
1750 } else {
1751 PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1752 PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1753 }
1754
1755 return ret_addr;
1756 }
1757
1758 void BCMFASTPATH
1759 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1760 {
1761 int dir;
1762 #ifdef BCMDMA64OSL
1763 dma_addr_t paddr;
1764 #endif /* BCMDMA64OSL */
1765
1766 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1767
1768
1769 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1770 #ifdef BCMDMA64OSL
1771 PHYSADDRTOULONG(pa, paddr);
1772 pci_unmap_single(osh->pdev, paddr, size, dir);
1773 #else
1774 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1775 #endif /* BCMDMA64OSL */
1776 }
1777
1778 /* OSL function for CPU relax */
1779 inline void BCMFASTPATH
1780 osl_cpu_relax(void)
1781 {
1782 cpu_relax();
1783 }
1784
1785 extern void osl_preempt_disable(osl_t *osh)
1786 {
1787 preempt_disable();
1788 }
1789
1790 extern void osl_preempt_enable(osl_t *osh)
1791 {
1792 preempt_enable();
1793 }
1794
1795 #if defined(BCMASSERT_LOG)
1796 void
1797 osl_assert(const char *exp, const char *file, int line)
1798 {
1799 char tempbuf[256];
1800 const char *basename;
1801
1802 basename = strrchr(file, '/');
1803 /* skip the '/' */
1804 if (basename)
1805 basename++;
1806
1807 if (!basename)
1808 basename = file;
1809
1810 #ifdef BCMASSERT_LOG
1811 snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1812 exp, basename, line);
1813 #endif /* BCMASSERT_LOG */
1814
1815
1816 switch (g_assert_type) {
1817 case 0:
1818 panic("%s", tempbuf);
1819 break;
1820 case 1:
1821 /* fall through */
1822 case 3:
1823 printk("%s", tempbuf);
1824 break;
1825 case 2:
1826 printk("%s", tempbuf);
1827 BUG();
1828 break;
1829 default:
1830 break;
1831 }
1832 }
1833 #endif
1834
1835 void
1836 osl_delay(uint usec)
1837 {
1838 uint d;
1839
1840 while (usec > 0) {
1841 d = MIN(usec, 1000);
1842 udelay(d);
1843 usec -= d;
1844 }
1845 }
1846
1847 void
1848 osl_sleep(uint ms)
1849 {
1850 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1851 if (ms < 20)
1852 usleep_range(ms*1000, ms*1000 + 1000);
1853 else
1854 #endif
1855 msleep(ms);
1856 }
1857
1858 uint64
1859 osl_sysuptime_us(void)
1860 {
1861 struct timeval tv;
1862 uint64 usec;
1863
1864 do_gettimeofday(&tv);
1865 /* tv_usec content is fraction of a second */
1866 usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1867 return usec;
1868 }
1869
1870
1871 /* Clone a packet.
1872 * The pkttag contents are NOT cloned.
1873 */
1874 #ifdef BCM_OBJECT_TRACE
1875 void *
1876 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
1877 #else
1878 void *
1879 osl_pktdup(osl_t *osh, void *skb)
1880 #endif /* BCM_OBJECT_TRACE */
1881 {
1882 void * p;
1883
1884 ASSERT(!PKTISCHAINED(skb));
1885
1886 /* clear the CTFBUF flag if set and map the rest of the buffer
1887 * before cloning.
1888 */
1889 PKTCTFMAP(osh, skb);
1890
1891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1892 if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1893 #else
1894 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1895 #endif
1896 return NULL;
1897
1898 #ifdef CTFPOOL
1899 if (PKTISFAST(osh, skb)) {
1900 ctfpool_t *ctfpool;
1901
1902 /* if the buffer allocated from ctfpool is cloned then
1903 * we can't be sure when it will be freed. since there
1904 * is a chance that we will be losing a buffer
1905 * from our pool, we increment the refill count for the
1906 * object to be alloced later.
1907 */
1908 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1909 ASSERT(ctfpool != NULL);
1910 PKTCLRFAST(osh, p);
1911 PKTCLRFAST(osh, skb);
1912 ctfpool->refills++;
1913 }
1914 #endif /* CTFPOOL */
1915
1916 /* Clear PKTC context */
1917 PKTSETCLINK(p, NULL);
1918 PKTCCLRFLAGS(p);
1919 PKTCSETCNT(p, 1);
1920 PKTCSETLEN(p, PKTLEN(osh, skb));
1921
1922 /* skb_clone copies skb->cb.. we don't want that */
1923 if (osh->pub.pkttag)
1924 OSL_PKTTAG_CLEAR(p);
1925
1926 /* Increment the packet counter */
1927 atomic_inc(&osh->cmn->pktalloced);
1928 #ifdef BCM_OBJECT_TRACE
1929 bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
1930 #endif /* BCM_OBJECT_TRACE */
1931
1932 return (p);
1933 }
1934
1935
1936
1937 /*
1938 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1939 */
1940
1941 /*
1942 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1943 */
1944
1945 uint
1946 osl_pktalloced(osl_t *osh)
1947 {
1948 if (atomic_read(&osh->cmn->refcount) == 1)
1949 return (atomic_read(&osh->cmn->pktalloced));
1950 else
1951 return 0;
1952 }
1953
1954 uint32
1955 osl_rand(void)
1956 {
1957 uint32 rand;
1958
1959 get_random_bytes(&rand, sizeof(rand));
1960
1961 return rand;
1962 }
1963
1964 /* Linux Kernel: File Operations: start */
1965 void *
1966 osl_os_open_image(char *filename)
1967 {
1968 struct file *fp;
1969
1970 fp = filp_open(filename, O_RDONLY, 0);
1971 /*
1972 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1973 * Alternative:
1974 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1975 * ???
1976 */
1977 if (IS_ERR(fp))
1978 fp = NULL;
1979
1980 return fp;
1981 }
1982
1983 int
1984 osl_os_get_image_block(char *buf, int len, void *image)
1985 {
1986 struct file *fp = (struct file *)image;
1987 int rdlen;
1988
1989 if (!image)
1990 return 0;
1991
1992 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1993 if (rdlen > 0)
1994 fp->f_pos += rdlen;
1995
1996 return rdlen;
1997 }
1998
1999 void
2000 osl_os_close_image(void *image)
2001 {
2002 if (image)
2003 filp_close((struct file *)image, NULL);
2004 }
2005
2006 int
2007 osl_os_image_size(void *image)
2008 {
2009 int len = 0, curroffset;
2010
2011 if (image) {
2012 /* store the current offset */
2013 curroffset = generic_file_llseek(image, 0, 1);
2014 /* goto end of file to get length */
2015 len = generic_file_llseek(image, 0, 2);
2016 /* restore back the offset */
2017 generic_file_llseek(image, curroffset, 0);
2018 }
2019 return len;
2020 }
2021
2022 /* Linux Kernel: File Operations: end */
2023
2024 #if (defined(STB) && defined(__arm__))
2025 inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
2026 {
2027 unsigned long flags = 0;
2028 int pci_access = 0;
2029 int acp_war_enab = ACP_WAR_ENAB();
2030
2031 if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
2032 pci_access = 1;
2033
2034 if (pci_access && acp_war_enab)
2035 spin_lock_irqsave(&l2x0_reg_lock, flags);
2036
2037 switch (size) {
2038 case sizeof(uint8):
2039 *(uint8*)v = readb((volatile uint8*)(addr));
2040 break;
2041 case sizeof(uint16):
2042 *(uint16*)v = readw((volatile uint16*)(addr));
2043 break;
2044 case sizeof(uint32):
2045 *(uint32*)v = readl((volatile uint32*)(addr));
2046 break;
2047 case sizeof(uint64):
2048 *(uint64*)v = *((volatile uint64*)(addr));
2049 break;
2050 }
2051
2052 if (pci_access && acp_war_enab)
2053 spin_unlock_irqrestore(&l2x0_reg_lock, flags);
2054 }
2055 #endif
2056
2057 #ifdef BCM_SECURE_DMA
2058 static void *
2059 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
2060 {
2061
2062 struct page **map;
2063 int order, i;
2064 void *addr = NULL;
2065
2066 size = PAGE_ALIGN(size);
2067 order = get_order(size);
2068
2069 map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
2070
2071 if (map == NULL)
2072 return NULL;
2073
2074 for (i = 0; i < (size >> PAGE_SHIFT); i++)
2075 map[i] = page + i;
2076
2077 if (iscache) {
2078 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
2079 if (isdecr) {
2080 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
2081 }
2082 } else {
2083
2084 #if defined(__ARM_ARCH_7A__)
2085 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
2086 pgprot_noncached(__pgprot(PAGE_KERNEL)));
2087 #endif
2088 if (isdecr) {
2089 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
2090 }
2091 }
2092
2093 kfree(map);
2094 return (void *)addr;
2095 }
2096
2097 static void
2098 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
2099 {
2100 vunmap(contig_base_va);
2101 }
2102
2103 static int
2104 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
2105 {
2106 int i;
2107 int ret = BCME_OK;
2108 sec_mem_elem_t *sec_mem_elem;
2109
2110 if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
2111
2112 *list = sec_mem_elem;
2113 bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
2114 for (i = 0; i < max-1; i++) {
2115 sec_mem_elem->next = (sec_mem_elem + 1);
2116 sec_mem_elem->size = mbsize;
2117 sec_mem_elem->pa_cma = osh->contig_base_alloc;
2118 sec_mem_elem->vac = osh->contig_base_alloc_va;
2119
2120 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2121 osh->contig_base_alloc += mbsize;
2122 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
2123
2124 sec_mem_elem = sec_mem_elem + 1;
2125 }
2126 sec_mem_elem->next = NULL;
2127 sec_mem_elem->size = mbsize;
2128 sec_mem_elem->pa_cma = osh->contig_base_alloc;
2129 sec_mem_elem->vac = osh->contig_base_alloc_va;
2130
2131 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2132 osh->contig_base_alloc += mbsize;
2133 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
2134
2135 } else {
2136 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
2137 ret = BCME_ERROR;
2138 }
2139 return ret;
2140 }
2141
2142
2143 static void
2144 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
2145 {
2146 if (sec_list_base)
2147 kfree(sec_list_base);
2148 }
2149
2150 static sec_mem_elem_t * BCMFASTPATH
2151 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
2152 struct sec_cma_info *ptr_cma_info, uint offset)
2153 {
2154 sec_mem_elem_t *sec_mem_elem = NULL;
2155
2156 #ifdef NOT_YET
2157 if (size <= 512 && osh->sec_list_512) {
2158 sec_mem_elem = osh->sec_list_512;
2159 osh->sec_list_512 = sec_mem_elem->next;
2160 }
2161 else if (size <= 2048 && osh->sec_list_2048) {
2162 sec_mem_elem = osh->sec_list_2048;
2163 osh->sec_list_2048 = sec_mem_elem->next;
2164 }
2165 else
2166 #else
2167 ASSERT(osh->sec_list_4096);
2168 sec_mem_elem = osh->sec_list_4096;
2169 osh->sec_list_4096 = sec_mem_elem->next;
2170 #endif /* NOT_YET */
2171
2172 sec_mem_elem->next = NULL;
2173
2174 if (ptr_cma_info->sec_alloc_list_tail) {
2175 ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
2176 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
2177 }
2178 else {
2179 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
2180 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
2181 ptr_cma_info->sec_alloc_list = sec_mem_elem;
2182 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
2183 }
2184 return sec_mem_elem;
2185 }
2186
2187 static void BCMFASTPATH
2188 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
2189 {
2190 sec_mem_elem->dma_handle = 0x0;
2191 sec_mem_elem->va = NULL;
2192 #ifdef NOT_YET
2193 if (sec_mem_elem->size == 512) {
2194 sec_mem_elem->next = osh->sec_list_512;
2195 osh->sec_list_512 = sec_mem_elem;
2196 } else if (sec_mem_elem->size == 2048) {
2197 sec_mem_elem->next = osh->sec_list_2048;
2198 osh->sec_list_2048 = sec_mem_elem;
2199 } else if (sec_mem_elem->size == 4096) {
2200 #endif /* NOT_YET */
2201 sec_mem_elem->next = osh->sec_list_4096;
2202 osh->sec_list_4096 = sec_mem_elem;
2203 #ifdef NOT_YET
2204 }
2205 else
2206 printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
2207 #endif /* NOT_YET */
2208 }
2209
2210 static sec_mem_elem_t * BCMFASTPATH
2211 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
2212 {
2213 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
2214 sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
2215
2216 if (sec_mem_elem->dma_handle == dma_handle) {
2217
2218 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
2219
2220 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
2221 ptr_cma_info->sec_alloc_list_tail = NULL;
2222 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
2223 }
2224
2225 return sec_mem_elem;
2226 }
2227 sec_mem_elem = sec_mem_elem->next;
2228
2229 while (sec_mem_elem != NULL) {
2230
2231 if (sec_mem_elem->dma_handle == dma_handle) {
2232
2233 sec_prv_elem->next = sec_mem_elem->next;
2234 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
2235 ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
2236
2237 return sec_mem_elem;
2238 }
2239 sec_prv_elem = sec_mem_elem;
2240 sec_mem_elem = sec_mem_elem->next;
2241 }
2242 return NULL;
2243 }
2244
2245 static sec_mem_elem_t *
2246 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
2247 {
2248 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
2249
2250 if (sec_mem_elem) {
2251
2252 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
2253
2254 if (ptr_cma_info->sec_alloc_list == NULL)
2255 ptr_cma_info->sec_alloc_list_tail = NULL;
2256
2257 return sec_mem_elem;
2258
2259 } else
2260 return NULL;
2261 }
2262
2263 static void * BCMFASTPATH
2264 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
2265 {
2266 return ptr_cma_info->sec_alloc_list_tail;
2267 }
2268
2269 dma_addr_t BCMFASTPATH
2270 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
2271 hnddma_seg_map_t *dmah, void *ptr_cma_info)
2272 {
2273 sec_mem_elem_t *sec_mem_elem;
2274 struct page *pa_cma_page;
2275 uint loffset;
2276 void *vaorig = ((uint8 *)va + size);
2277 dma_addr_t dma_handle = 0x0;
2278 /* packet will be the one added with osl_sec_dma_map() just before this call */
2279
2280 sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
2281
2282 if (sec_mem_elem && sec_mem_elem->va == vaorig) {
2283
2284 pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2285 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
2286
2287 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
2288 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2289
2290 } else {
2291 printf("%s: error orig va not found va = 0x%p \n",
2292 __FUNCTION__, vaorig);
2293 }
2294 return dma_handle;
2295 }
2296
2297 dma_addr_t BCMFASTPATH
2298 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
2299 hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
2300 {
2301
2302 sec_mem_elem_t *sec_mem_elem;
2303 struct page *pa_cma_page;
2304 void *pa_cma_kmap_va = NULL;
2305 uint buflen = 0;
2306 dma_addr_t dma_handle = 0x0;
2307 uint loffset;
2308 #ifdef NOT_YET
2309 int *fragva;
2310 struct sk_buff *skb;
2311 int i = 0;
2312 #endif /* NOT_YET */
2313
2314 ASSERT((direction == DMA_RX) || (direction == DMA_TX));
2315 sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
2316
2317 sec_mem_elem->va = va;
2318 sec_mem_elem->direction = direction;
2319 pa_cma_page = sec_mem_elem->pa_cma_page;
2320
2321 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
2322 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2323 * pa_cma_kmap_va += loffset;
2324 */
2325
2326 pa_cma_kmap_va = sec_mem_elem->vac;
2327 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
2328 buflen = size;
2329
2330 if (direction == DMA_TX) {
2331 memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
2332
2333 #ifdef NOT_YET
2334 if (p == NULL) {
2335
2336 memcpy(pa_cma_kmap_va, va, size);
2337 /* prhex("Txpkt",pa_cma_kmap_va, size); */
2338 } else {
2339 for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
2340 if (skb_is_nonlinear(skb)) {
2341
2342
2343 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2344 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2345 fragva = kmap_atomic(skb_frag_page(f));
2346 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
2347 memcpy((pa_cma_kmap_va),
2348 (fragva + f->page_offset), skb_frag_size(f));
2349 kunmap_atomic(fragva);
2350 buflen += skb_frag_size(f);
2351 }
2352 } else {
2353
2354 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
2355 memcpy(pa_cma_kmap_va, skb->data, skb->len);
2356 buflen += skb->len;
2357 }
2358 }
2359
2360 }
2361 #endif /* NOT_YET */
2362 if (dmah) {
2363 dmah->nsegs = 1;
2364 dmah->origsize = buflen;
2365 }
2366 }
2367 else
2368 {
2369 if ((p != NULL) && (dmah != NULL)) {
2370 dmah->nsegs = 1;
2371 dmah->origsize = buflen;
2372 }
2373 *(uint32 *)(pa_cma_kmap_va) = 0x0;
2374 }
2375
2376 if (direction == DMA_RX) {
2377 flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
2378 }
2379 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
2380 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2381 if (dmah) {
2382 dmah->segs[0].addr = dma_handle;
2383 dmah->segs[0].length = buflen;
2384 }
2385 sec_mem_elem->dma_handle = dma_handle;
2386 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
2387 return dma_handle;
2388 }
2389
2390 dma_addr_t BCMFASTPATH
2391 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
2392 {
2393
2394 struct page *pa_cma_page;
2395 phys_addr_t pa_cma;
2396 dma_addr_t dma_handle = 0x0;
2397 uint loffset;
2398
2399 pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
2400 pa_cma_page = phys_to_page(pa_cma);
2401 loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
2402
2403 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
2404 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2405
2406 return dma_handle;
2407 }
2408
2409 void BCMFASTPATH
2410 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
2411 void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
2412 {
2413 sec_mem_elem_t *sec_mem_elem;
2414 #ifdef NOT_YET
2415 struct page *pa_cma_page;
2416 #endif
2417 void *pa_cma_kmap_va = NULL;
2418 uint buflen = 0;
2419 dma_addr_t pa_cma;
2420 void *va;
2421 int read_count = 0;
2422 BCM_REFERENCE(buflen);
2423 BCM_REFERENCE(read_count);
2424
2425 sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
2426 ASSERT(sec_mem_elem);
2427
2428 va = sec_mem_elem->va;
2429 va = (uint8 *)va - offset;
2430 pa_cma = sec_mem_elem->pa_cma;
2431
2432 #ifdef NOT_YET
2433 pa_cma_page = sec_mem_elem->pa_cma_page;
2434 #endif
2435
2436 if (direction == DMA_RX) {
2437
2438 if (p == NULL) {
2439
2440 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2441 * pa_cma_kmap_va += loffset;
2442 */
2443
2444 pa_cma_kmap_va = sec_mem_elem->vac;
2445
2446 do {
2447 invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
2448
2449 buflen = *(uint *)(pa_cma_kmap_va);
2450 if (buflen)
2451 break;
2452
2453 OSL_DELAY(1);
2454 read_count++;
2455 } while (read_count < 200);
2456 dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
2457 memcpy(va, pa_cma_kmap_va, size);
2458 /* kunmap_atomic(pa_cma_kmap_va); */
2459 }
2460 #ifdef NOT_YET
2461 else {
2462 buflen = 0;
2463 for (skb = (struct sk_buff *)p; (buflen < size) &&
2464 (skb != NULL); skb = skb->next) {
2465 if (skb_is_nonlinear(skb)) {
2466 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2467 for (i = 0; (buflen < size) &&
2468 (i < skb_shinfo(skb)->nr_frags); i++) {
2469 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2470 cpuaddr = kmap_atomic(skb_frag_page(f));
2471 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
2472 memcpy((cpuaddr + f->page_offset),
2473 pa_cma_kmap_va, skb_frag_size(f));
2474 kunmap_atomic(cpuaddr);
2475 buflen += skb_frag_size(f);
2476 }
2477 kunmap_atomic(pa_cma_kmap_va);
2478 } else {
2479 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2480 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
2481 memcpy(skb->data, pa_cma_kmap_va, skb->len);
2482 kunmap_atomic(pa_cma_kmap_va);
2483 buflen += skb->len;
2484 }
2485
2486 }
2487
2488 }
2489 #endif /* NOT YET */
2490 } else {
2491 dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
2492 }
2493
2494 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
2495 }
2496
2497 void
2498 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
2499 {
2500
2501 sec_mem_elem_t *sec_mem_elem;
2502
2503 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
2504
2505 while (sec_mem_elem != NULL) {
2506
2507 dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
2508 sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2509 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
2510
2511 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
2512 }
2513 }
2514
2515 static void
2516 osl_sec_dma_init_consistent(osl_t *osh)
2517 {
2518 int i;
2519 void *temp_va = osh->contig_base_alloc_coherent_va;
2520 phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
2521
2522 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2523 osh->sec_cma_coherent[i].avail = TRUE;
2524 osh->sec_cma_coherent[i].va = temp_va;
2525 osh->sec_cma_coherent[i].pa = temp_pa;
2526 temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
2527 temp_pa += SEC_CMA_COHERENT_BLK;
2528 }
2529 }
2530
2531 static void *
2532 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
2533 {
2534
2535 void *temp_va = NULL;
2536 ulong temp_pa = 0;
2537 int i;
2538
2539 if (size > SEC_CMA_COHERENT_BLK) {
2540 printf("%s unsupported size\n", __FUNCTION__);
2541 return NULL;
2542 }
2543
2544 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2545 if (osh->sec_cma_coherent[i].avail == TRUE) {
2546 temp_va = osh->sec_cma_coherent[i].va;
2547 temp_pa = osh->sec_cma_coherent[i].pa;
2548 osh->sec_cma_coherent[i].avail = FALSE;
2549 break;
2550 }
2551 }
2552
2553 if (i == SEC_CMA_COHERENT_MAX)
2554 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
2555 temp_va, (ulong)temp_pa, size);
2556
2557 *pap = (unsigned long)temp_pa;
2558 return temp_va;
2559 }
2560
2561 static void
2562 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
2563 {
2564 int i = 0;
2565
2566 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2567 if (osh->sec_cma_coherent[i].va == va) {
2568 osh->sec_cma_coherent[i].avail = TRUE;
2569 break;
2570 }
2571 }
2572 if (i == SEC_CMA_COHERENT_MAX)
2573 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
2574 va, (ulong)pa, size);
2575 }
2576
2577 #endif /* BCM_SECURE_DMA */
2578
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
2580 #include <linux/kallsyms.h>
2581 #include <net/sock.h>
2582 void
2583 osl_pkt_orphan_partial(struct sk_buff *skb)
2584 {
2585 uint32 fraction;
2586 static void *p_tcp_wfree = NULL;
2587
2588 if (!skb->destructor || skb->destructor == sock_wfree)
2589 return;
2590
2591 if (unlikely(!p_tcp_wfree)) {
2592 char sym[KSYM_SYMBOL_LEN];
2593 sprint_symbol(sym, (unsigned long)skb->destructor);
2594 sym[9] = 0;
2595 if (!strcmp(sym, "tcp_wfree"))
2596 p_tcp_wfree = skb->destructor;
2597 else
2598 return;
2599 }
2600
2601 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
2602 return;
2603
2604 /* abstract a certain portion of skb truesize from the socket
2605 * sk_wmem_alloc to allow more skb can be allocated for this
2606 * socket for better cusion meeting WiFi device requirement
2607 */
2608 fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
2609 skb->truesize -= fraction;
2610 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
2611 }
2612 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
2613
2614 /* timer apis */
2615 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
2616
2617 osl_timer_t *
2618 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
2619 {
2620 osl_timer_t *t;
2621 BCM_REFERENCE(fn);
2622 if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
2623 printk(KERN_ERR "osl_timer_init: malloced failed for osl_timer_t\n");
2624 return (NULL);
2625 }
2626 bzero(t, sizeof(osl_timer_t));
2627 if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
2628 printk(KERN_ERR "osl_timer_init: malloc failed\n");
2629 MFREE(NULL, t, sizeof(osl_timer_t));
2630 return (NULL);
2631 }
2632 t->timer->data = (ulong)arg;
2633 t->timer->function = (linux_timer_fn)fn;
2634 t->set = TRUE;
2635
2636 init_timer(t->timer);
2637
2638 return (t);
2639 }
2640
2641 void
2642 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
2643 {
2644
2645 if (t == NULL) {
2646 printf("%s: Timer handle is NULL\n", __FUNCTION__);
2647 return;
2648 }
2649 ASSERT(!t->set);
2650
2651 t->set = TRUE;
2652 if (periodic) {
2653 printf("Periodic timers are not supported by Linux timer apis\n");
2654 }
2655 t->timer->expires = jiffies + ms*HZ/1000;
2656
2657 add_timer(t->timer);
2658
2659 return;
2660 }
2661
2662 void
2663 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
2664 {
2665
2666 if (t == NULL) {
2667 printf("%s: Timer handle is NULL\n", __FUNCTION__);
2668 return;
2669 }
2670 if (periodic) {
2671 printf("Periodic timers are not supported by Linux timer apis\n");
2672 }
2673 t->set = TRUE;
2674 t->timer->expires = jiffies + ms*HZ/1000;
2675
2676 mod_timer(t->timer, t->timer->expires);
2677
2678 return;
2679 }
2680
2681 /*
2682 * Return TRUE if timer successfully deleted, FALSE if still pending
2683 */
2684 bool
2685 osl_timer_del(osl_t *osh, osl_timer_t *t)
2686 {
2687 if (t == NULL) {
2688 printf("%s: Timer handle is NULL\n", __FUNCTION__);
2689 return (FALSE);
2690 }
2691 if (t->set) {
2692 t->set = FALSE;
2693 if (t->timer) {
2694 del_timer(t->timer);
2695 MFREE(NULL, t->timer, sizeof(struct timer_list));
2696 }
2697 MFREE(NULL, t, sizeof(osl_timer_t));
2698 }
2699 return (TRUE);
2700 }